linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* ppc32/kprobe: Fix a bug for kprobe stwu r1
@ 2011-12-12  8:50 Tiejun Chen
  2011-12-12  8:50 ` [PATCH 1/4] powerpc/kprobe: introduce a new thread flag Tiejun Chen
                   ` (3 more replies)
  0 siblings, 4 replies; 16+ messages in thread
From: Tiejun Chen @ 2011-12-12  8:50 UTC (permalink / raw)
  To: benh, linuxppc-dev

ppc32/kprobe: Fix a bug for kprobe stwu r1

There patches is used to fix that known kprobe bug,
[BUG?]3.0-rc4+ftrace+kprobe: set kprobe at instruction 'stwu' lead to system crash/freeze

https://lkml.org/lkml/2011/7/3/156

We withdraw the original way to provide a dedicated exception stack. Now we
implement this based on Ben's suggestion:

https://lkml.org/lkml/2011/11/30/327

Here I fix this bug only for ppc32 since Ben address another problem in ppc64
exception return codes. So I think I'd better send another patch to fix this
bug issued from ppc64 firstly. Then its convenient to merge this fix into ppc64.

Tiejun Chen (4):
      powerpc/kprobe: introduce a new thread flag
      ppc32/kprobe: introduce copy_exc_stack
      ppc32/kprobe: complete kprobe and migrate exception frame
      ppc32/kprobe: don't emulate store when kprobe stwu r1

 arch/powerpc/include/asm/page_32.h     |    1 +
 arch/powerpc/include/asm/thread_info.h |    2 ++
 arch/powerpc/kernel/entry_32.S         |   26 ++++++++++++++++++++++++++
 arch/powerpc/kernel/misc_32.S          |   16 +++++++++++++++-
 arch/powerpc/kernel/ppc_ksyms.c        |    1 +
 arch/powerpc/lib/sstep.c               |   19 +++++++++++++++++--
 6 files changed, 62 insertions(+), 3 deletions(-)

Tiejun

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/4] powerpc/kprobe: introduce a new thread flag
  2011-12-12  8:50 ppc32/kprobe: Fix a bug for kprobe stwu r1 Tiejun Chen
@ 2011-12-12  8:50 ` Tiejun Chen
  2011-12-12 22:58   ` Benjamin Herrenschmidt
  2011-12-12  8:50 ` [PATCH 2/4] ppc32/kprobe: introduce copy_exc_stack Tiejun Chen
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 16+ messages in thread
From: Tiejun Chen @ 2011-12-12  8:50 UTC (permalink / raw)
  To: benh, linuxppc-dev

We need to add a new thread flag, TIF_KPROBE/_TIF_DELAYED_KPROBE,
for handling kprobe operation while exiting exception.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/include/asm/thread_info.h |    2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 836f231..3378734 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -112,6 +112,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_FREEZE		14	/* Freezing for suspend */
 #define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */
 #define TIF_RUNLATCH		16	/* Is the runlatch enabled? */
+#define TIF_KPROBE		17	/* Is the delayed kprobe operation? */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_FREEZE		(1<<TIF_FREEZE)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)
+#define _TIF_DELAYED_KPROBE	(1<<TIF_KPROBE)
 #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
 
-- 
1.5.6

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/4] ppc32/kprobe: introduce copy_exc_stack
  2011-12-12  8:50 ppc32/kprobe: Fix a bug for kprobe stwu r1 Tiejun Chen
  2011-12-12  8:50 ` [PATCH 1/4] powerpc/kprobe: introduce a new thread flag Tiejun Chen
@ 2011-12-12  8:50 ` Tiejun Chen
  2011-12-12 23:01   ` Benjamin Herrenschmidt
  2011-12-12  8:50 ` [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame Tiejun Chen
  2011-12-12  8:50 ` [PATCH 4/4] ppc32/kprobe: don't emulate store when kprobe stwu r1 Tiejun Chen
  3 siblings, 1 reply; 16+ messages in thread
From: Tiejun Chen @ 2011-12-12  8:50 UTC (permalink / raw)
  To: benh, linuxppc-dev

We need a copy mechanism to migrate exception stack. But looks copy_page()
already implement this well so we can complete copy_exc_stack() based on
that directly.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/include/asm/page_32.h |    1 +
 arch/powerpc/kernel/misc_32.S      |   16 +++++++++++++++-
 arch/powerpc/kernel/ppc_ksyms.c    |    1 +
 3 files changed, 17 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index 68d73b2..2c1fd84 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -40,6 +40,7 @@ struct page;
 extern void clear_pages(void *page, int order);
 static inline void clear_page(void *page) { clear_pages(page, 0); }
 extern void copy_page(void *to, void *from);
+extern void copy_exc_stack(void *to, void *from);
 
 #include <asm-generic/getorder.h>
 
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 998a100..aa02545 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -527,7 +527,7 @@ _GLOBAL(clear_pages)
 	stw	r8,12(r3);	\
 	stwu	r9,16(r3)
 
-_GLOBAL(copy_page)
+ready_copy:
 	addi	r3,r3,-4
 	addi	r4,r4,-4
 
@@ -544,7 +544,21 @@ _GLOBAL(copy_page)
 	dcbt	r5,r4
 	li	r11,L1_CACHE_BYTES+4
 #endif /* MAX_COPY_PREFETCH */
+	blr
+
+_GLOBAL(copy_exc_stack)
+	mflr	r12
+	bl	ready_copy
+	mtlr	r12
+	li	r0,INT_FRAME_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
+	b	go_copy
+
+_GLOBAL(copy_page)
+	mflr	r12
+	bl	ready_copy
+	mtlr	r12
 	li	r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
+go_copy:
 	crclr	4*cr0+eq
 2:
 	mtctr	r0
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index f5ae872..2223daf 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -88,6 +88,7 @@ EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(copy_exc_stack);
 
 #if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
 EXPORT_SYMBOL(isa_io_base);
-- 
1.5.6

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame
  2011-12-12  8:50 ppc32/kprobe: Fix a bug for kprobe stwu r1 Tiejun Chen
  2011-12-12  8:50 ` [PATCH 1/4] powerpc/kprobe: introduce a new thread flag Tiejun Chen
  2011-12-12  8:50 ` [PATCH 2/4] ppc32/kprobe: introduce copy_exc_stack Tiejun Chen
@ 2011-12-12  8:50 ` Tiejun Chen
  2011-12-12 23:19   ` Benjamin Herrenschmidt
  2011-12-12  8:50 ` [PATCH 4/4] ppc32/kprobe: don't emulate store when kprobe stwu r1 Tiejun Chen
  3 siblings, 1 reply; 16+ messages in thread
From: Tiejun Chen @ 2011-12-12  8:50 UTC (permalink / raw)
  To: benh, linuxppc-dev

We can't emulate stwu since that may corrupt current exception stack.
So we will have to do real store operation in the exception return code.

Firstly we'll allocate a trampoline exception frame below the kprobed
function stack and copy the current exception frame to the trampoline.
Then we can do this real store operation to implement 'stwu', and reroute
the trampoline frame to r1 to complete this exception migration.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/kernel/entry_32.S |   26 ++++++++++++++++++++++++++
 1 files changed, 26 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc..d56e311 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1185,6 +1185,8 @@ recheck:
 	bne-	do_resched
 	andi.	r0,r9,_TIF_USER_WORK_MASK
 	beq	restore_user
+	andis.	r0,r9,_TIF_DELAYED_KPROBE@h
+	bne-	restore_kprobe
 do_user_signal:			/* r10 contains MSR_KERNEL here */
 	ori	r10,r10,MSR_EE
 	SYNC
@@ -1202,6 +1204,30 @@ do_user_signal:			/* r10 contains MSR_KERNEL here */
 	REST_NVGPRS(r1)
 	b	recheck
 
+restore_kprobe:
+	lwz	r3,GPR1(r1)
+	subi    r3,r3,INT_FRAME_SIZE; /* Allocate a trampoline exception frame */
+	mr	r4,r1
+	bl	copy_exc_stack	/* Copy from the original to the trampoline */
+
+	/* Do real stw operation to complete stwu */
+	mr	r4,r1
+	addi	r4,r4,INT_FRAME_SIZE	/* Get kprobed entry */
+	lwz	r5,GPR1(r1)		/* Backup r1 */
+	stw	r4,GPR1(r1)		/* Now store that safely */
+
+	/* Reroute the trampoline frame to r1 */
+	subi    r5,r5,INT_FRAME_SIZE
+	mr	r1,r5
+
+	/* Clear _TIF_DELAYED_KPROBE flag */
+	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
+	lwz	r0,TI_FLAGS(r9)
+	rlwinm	r0,r0,0,_TIF_DELAYED_KPROBE
+	stw	r0,TI_FLAGS(r9)
+
+	b	restore
+
 /*
  * We come here when we are at the end of handling an exception
  * that occurred at a place where taking an exception will lose
-- 
1.5.6

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/4] ppc32/kprobe: don't emulate store when kprobe stwu r1
  2011-12-12  8:50 ppc32/kprobe: Fix a bug for kprobe stwu r1 Tiejun Chen
                   ` (2 preceding siblings ...)
  2011-12-12  8:50 ` [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame Tiejun Chen
@ 2011-12-12  8:50 ` Tiejun Chen
  3 siblings, 0 replies; 16+ messages in thread
From: Tiejun Chen @ 2011-12-12  8:50 UTC (permalink / raw)
  To: benh, linuxppc-dev

We don't do the real store operation for kprobing 'stwu Rx,(y)R1'
since this may corrupt the exception frame, now we will do this
operation safely in exception return code after migrate current
exception frame below the kprobed function stack.

So we only update gpr[1] here and trigger a thread flag to mask
this.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/lib/sstep.c |   19 +++++++++++++++++--
 1 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 9a52349..78b7168 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -566,7 +566,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 	unsigned long int ea;
 	unsigned int cr, mb, me, sh;
 	int err;
-	unsigned long old_ra;
+	unsigned long old_ra, val3;
 	long ival;
 
 	opcode = instr >> 26;
@@ -1486,10 +1486,25 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 		goto ldst_done;
 
 	case 36:	/* stw */
-	case 37:	/* stwu */
 		val = regs->gpr[rd];
 		err = write_mem(val, dform_ea(instr, regs), 4, regs);
 		goto ldst_done;
+	case 37:	/* stwu */
+		val = regs->gpr[rd];
+		val3 = dform_ea(instr, regs);
+		/* For PPC32 we always use stwu to change stack point with r1. So
+		 * this emulated store may corrupt the exception frame, now we
+		 * have to provide the exception frame trampoline, which is pushed
+		 * below the kprobed function stack. So we only update gpr[1] but
+		 * don't emulate the real store operation. We will do real store
+		 * operation safely in exception return code by checking this flag.
+		 */
+		if (ra == 1) {
+			set_thread_flag(TIF_KPROBE);
+			err = 0;
+		} else
+			err = write_mem(val, val3, 4, regs);
+		goto ldst_done;
 
 	case 38:	/* stb */
 	case 39:	/* stbu */
-- 
1.5.6

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] powerpc/kprobe: introduce a new thread flag
  2011-12-12  8:50 ` [PATCH 1/4] powerpc/kprobe: introduce a new thread flag Tiejun Chen
@ 2011-12-12 22:58   ` Benjamin Herrenschmidt
  2011-12-13  4:56     ` tiejun.chen
  0 siblings, 1 reply; 16+ messages in thread
From: Benjamin Herrenschmidt @ 2011-12-12 22:58 UTC (permalink / raw)
  To: Tiejun Chen; +Cc: linuxppc-dev

On Mon, 2011-12-12 at 16:50 +0800, Tiejun Chen wrote:
> We need to add a new thread flag, TIF_KPROBE/_TIF_DELAYED_KPROBE,
> for handling kprobe operation while exiting exception.

The basic idea is sane, however the instruction emulation isn't per-se
kprobe specific. It could be used by xmon too for example. I'd rather
use a different name, something like TIF_EMULATE_STACK_STORE or
something like that.

Cheers,
Ben.

> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/include/asm/thread_info.h |    2 ++
>  1 files changed, 2 insertions(+), 0 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
> index 836f231..3378734 100644
> --- a/arch/powerpc/include/asm/thread_info.h
> +++ b/arch/powerpc/include/asm/thread_info.h
> @@ -112,6 +112,7 @@ static inline struct thread_info *current_thread_info(void)
>  #define TIF_FREEZE		14	/* Freezing for suspend */
>  #define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */
>  #define TIF_RUNLATCH		16	/* Is the runlatch enabled? */
> +#define TIF_KPROBE		17	/* Is the delayed kprobe operation? */
>  
>  /* as above, but as bit values */
>  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
> @@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
>  #define _TIF_FREEZE		(1<<TIF_FREEZE)
>  #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
>  #define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)
> +#define _TIF_DELAYED_KPROBE	(1<<TIF_KPROBE)
>  #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
>  				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
>  

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/4] ppc32/kprobe: introduce copy_exc_stack
  2011-12-12  8:50 ` [PATCH 2/4] ppc32/kprobe: introduce copy_exc_stack Tiejun Chen
@ 2011-12-12 23:01   ` Benjamin Herrenschmidt
  2011-12-13  4:58     ` tiejun.chen
  0 siblings, 1 reply; 16+ messages in thread
From: Benjamin Herrenschmidt @ 2011-12-12 23:01 UTC (permalink / raw)
  To: Tiejun Chen; +Cc: linuxppc-dev

On Mon, 2011-12-12 at 16:50 +0800, Tiejun Chen wrote:
> We need a copy mechanism to migrate exception stack. But looks copy_page()
> already implement this well so we can complete copy_exc_stack() based on
> that directly.

I'd rather you don't hijack copy_page which is quite sensitive. The
emulation isn't performance critical so a "dumber" routine would work
fine.

Why not use memcpy ? You can call it from assembly.

Cheers,
Ben.

> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/include/asm/page_32.h |    1 +
>  arch/powerpc/kernel/misc_32.S      |   16 +++++++++++++++-
>  arch/powerpc/kernel/ppc_ksyms.c    |    1 +
>  3 files changed, 17 insertions(+), 1 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
> index 68d73b2..2c1fd84 100644
> --- a/arch/powerpc/include/asm/page_32.h
> +++ b/arch/powerpc/include/asm/page_32.h
> @@ -40,6 +40,7 @@ struct page;
>  extern void clear_pages(void *page, int order);
>  static inline void clear_page(void *page) { clear_pages(page, 0); }
>  extern void copy_page(void *to, void *from);
> +extern void copy_exc_stack(void *to, void *from);
>  
>  #include <asm-generic/getorder.h>
>  
> diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
> index 998a100..aa02545 100644
> --- a/arch/powerpc/kernel/misc_32.S
> +++ b/arch/powerpc/kernel/misc_32.S
> @@ -527,7 +527,7 @@ _GLOBAL(clear_pages)
>  	stw	r8,12(r3);	\
>  	stwu	r9,16(r3)
>  
> -_GLOBAL(copy_page)
> +ready_copy:
>  	addi	r3,r3,-4
>  	addi	r4,r4,-4
>  
> @@ -544,7 +544,21 @@ _GLOBAL(copy_page)
>  	dcbt	r5,r4
>  	li	r11,L1_CACHE_BYTES+4
>  #endif /* MAX_COPY_PREFETCH */
> +	blr
> +
> +_GLOBAL(copy_exc_stack)
> +	mflr	r12
> +	bl	ready_copy
> +	mtlr	r12
> +	li	r0,INT_FRAME_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
> +	b	go_copy
> +
> +_GLOBAL(copy_page)
> +	mflr	r12
> +	bl	ready_copy
> +	mtlr	r12
>  	li	r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
> +go_copy:
>  	crclr	4*cr0+eq
>  2:
>  	mtctr	r0
> diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
> index f5ae872..2223daf 100644
> --- a/arch/powerpc/kernel/ppc_ksyms.c
> +++ b/arch/powerpc/kernel/ppc_ksyms.c
> @@ -88,6 +88,7 @@ EXPORT_SYMBOL(__clear_user);
>  EXPORT_SYMBOL(__strncpy_from_user);
>  EXPORT_SYMBOL(__strnlen_user);
>  EXPORT_SYMBOL(copy_page);
> +EXPORT_SYMBOL(copy_exc_stack);
>  
>  #if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
>  EXPORT_SYMBOL(isa_io_base);

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame
  2011-12-12  8:50 ` [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame Tiejun Chen
@ 2011-12-12 23:19   ` Benjamin Herrenschmidt
  2011-12-13  4:54     ` tiejun.chen
  0 siblings, 1 reply; 16+ messages in thread
From: Benjamin Herrenschmidt @ 2011-12-12 23:19 UTC (permalink / raw)
  To: Tiejun Chen; +Cc: linuxppc-dev

On Mon, 2011-12-12 at 16:50 +0800, Tiejun Chen wrote:
> We can't emulate stwu since that may corrupt current exception stack.
> So we will have to do real store operation in the exception return code.
> 
> Firstly we'll allocate a trampoline exception frame below the kprobed
> function stack and copy the current exception frame to the trampoline.
> Then we can do this real store operation to implement 'stwu', and reroute
> the trampoline frame to r1 to complete this exception migration.
> 
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/kernel/entry_32.S |   26 ++++++++++++++++++++++++++
>  1 files changed, 26 insertions(+), 0 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
> index 56212bc..d56e311 100644
> --- a/arch/powerpc/kernel/entry_32.S
> +++ b/arch/powerpc/kernel/entry_32.S
> @@ -1185,6 +1185,8 @@ recheck:
>  	bne-	do_resched
>  	andi.	r0,r9,_TIF_USER_WORK_MASK
>  	beq	restore_user
> +	andis.	r0,r9,_TIF_DELAYED_KPROBE@h
> +	bne-	restore_kprobe

Same comment as earlier about name. Note that you're not hooking in the
right place. "recheck" is only reached if you -already- went out of the
normal exit path and only when going back to user space unless I'm
missing something (which is really the case you don't care about).

You need to hook into "resume_kernel" instead.

Also, we may want to simplify the whole thing, instead of checking user
vs. kernel first etc... we could instead have a single _TIF_WORK_MASK
which includes both the bits for user work and the new bit for kernel
work. With preempt, the kernel work bits would also include
_TIF_NEED_RESCHED.

Then you have in the common exit path, a single test for that, with a
fast path that skips everything and just goes to "restore" for both
kernel and user.

The only possible issue is the setting of dbcr0 for BookE and 44x and we
can keep that as a special case keyed of MSR_PR in the resume path under
ifdef BOOKE (we'll probably sanitize that later with some different
rework anyway). 

So the exit path because something like:

ret_from_except:
	.. hard disable interrupts (unchanged) ...
	read TIF flags
	andi with _TIF_WORK_MASK
		nothing set -> restore
	check PR
		set -> do_work_user
		no set -> do_work_kernel (kprobes & preempt)
		(both loop until relevant _TIF flags are all clear)
restore:
	#ifdef BOOKE & 44x test PR & do dbcr0 stuff if needed
	... nornal restore ...

>  do_user_signal:			/* r10 contains MSR_KERNEL here */
>  	ori	r10,r10,MSR_EE
>  	SYNC
> @@ -1202,6 +1204,30 @@ do_user_signal:			/* r10 contains MSR_KERNEL here */
>  	REST_NVGPRS(r1)
>  	b	recheck
>  
> +restore_kprobe:
> +	lwz	r3,GPR1(r1)
> +	subi    r3,r3,INT_FRAME_SIZE; /* Allocate a trampoline exception frame */
> +	mr	r4,r1
> +	bl	copy_exc_stack	/* Copy from the original to the trampoline */
> +
> +	/* Do real stw operation to complete stwu */
> +	mr	r4,r1
> +	addi	r4,r4,INT_FRAME_SIZE	/* Get kprobed entry */
> +	lwz	r5,GPR1(r1)		/* Backup r1 */
> +	stw	r4,GPR1(r1)		/* Now store that safely */

The above confuses me. Shouldn't you do instead something like

	lwz	r4,GPR1(r1)
	subi	r3,r4,INT_FRAME_SIZE
	li	r5,INT_FRAME_SIZE
	bl	memcpy

To start with, then you need to know the "old" r1 value which may or may
not be related to your current r1. The emulation code should stash it
into the int frame in an unused slot such as "orig_gpr3" (since that
only pertains to restarting syscalls which we aren't doing here).

Then you adjust your r1 and do something like

	lwz	r3,GPR1(r1)
	lwz	r0,ORIG_GPR3(r1)
	stw	r0,0(r3)

To perform the store, before doing the rest:
 
> +	/* Reroute the trampoline frame to r1 */
> +	subi    r5,r5,INT_FRAME_SIZE
> +	mr	r1,r5
> +
> +	/* Clear _TIF_DELAYED_KPROBE flag */
> +	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
> +	lwz	r0,TI_FLAGS(r9)
> +	rlwinm	r0,r0,0,_TIF_DELAYED_KPROBE
> +	stw	r0,TI_FLAGS(r9)
> +
> +	b	restore
> +
>  /*
>   * We come here when we are at the end of handling an exception
>   * that occurred at a place where taking an exception will lose

Cheers,
Ben.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame
  2011-12-12 23:19   ` Benjamin Herrenschmidt
@ 2011-12-13  4:54     ` tiejun.chen
  2011-12-13  8:21       ` tiejun.chen
  2011-12-13 10:36       ` tiejun.chen
  0 siblings, 2 replies; 16+ messages in thread
From: tiejun.chen @ 2011-12-13  4:54 UTC (permalink / raw)
  To: Benjamin Herrenschmidt; +Cc: linuxppc-dev

Benjamin Herrenschmidt wrote:
> On Mon, 2011-12-12 at 16:50 +0800, Tiejun Chen wrote:
>> We can't emulate stwu since that may corrupt current exception stack.
>> So we will have to do real store operation in the exception return code.
>>
>> Firstly we'll allocate a trampoline exception frame below the kprobed
>> function stack and copy the current exception frame to the trampoline.
>> Then we can do this real store operation to implement 'stwu', and reroute
>> the trampoline frame to r1 to complete this exception migration.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---
>>  arch/powerpc/kernel/entry_32.S |   26 ++++++++++++++++++++++++++
>>  1 files changed, 26 insertions(+), 0 deletions(-)
>>
>> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
>> index 56212bc..d56e311 100644
>> --- a/arch/powerpc/kernel/entry_32.S
>> +++ b/arch/powerpc/kernel/entry_32.S
>> @@ -1185,6 +1185,8 @@ recheck:
>>  	bne-	do_resched
>>  	andi.	r0,r9,_TIF_USER_WORK_MASK
>>  	beq	restore_user
>> +	andis.	r0,r9,_TIF_DELAYED_KPROBE@h
>> +	bne-	restore_kprobe
> 
> Same comment as earlier about name. Note that you're not hooking in the
> right place. "recheck" is only reached if you -already- went out of the
> normal exit path and only when going back to user space unless I'm
> missing something (which is really the case you don't care about).
> 
> You need to hook into "resume_kernel" instead.

Maybe I'm misunderstanding what you mean since as I recall you suggestion we
should do this at the end of do_work.

> 
> Also, we may want to simplify the whole thing, instead of checking user
> vs. kernel first etc... we could instead have a single _TIF_WORK_MASK
> which includes both the bits for user work and the new bit for kernel
> work. With preempt, the kernel work bits would also include
> _TIF_NEED_RESCHED.
> 
> Then you have in the common exit path, a single test for that, with a
> fast path that skips everything and just goes to "restore" for both
> kernel and user.
> 
> The only possible issue is the setting of dbcr0 for BookE and 44x and we
> can keep that as a special case keyed of MSR_PR in the resume path under
> ifdef BOOKE (we'll probably sanitize that later with some different
> rework anyway). 
> 
> So the exit path because something like:
> 
> ret_from_except:
> 	.. hard disable interrupts (unchanged) ...
> 	read TIF flags
> 	andi with _TIF_WORK_MASK
> 		nothing set -> restore
> 	check PR
> 		set -> do_work_user
> 		no set -> do_work_kernel (kprobes & preempt)
> 		(both loop until relevant _TIF flags are all clear)
> restore:
> 	#ifdef BOOKE & 44x test PR & do dbcr0 stuff if needed
> 	... nornal restore ...

Do you mean we should reorganize current ret_from_except for ppc32 as well?

> 
>>  do_user_signal:			/* r10 contains MSR_KERNEL here */
>>  	ori	r10,r10,MSR_EE
>>  	SYNC
>> @@ -1202,6 +1204,30 @@ do_user_signal:			/* r10 contains MSR_KERNEL here */
>>  	REST_NVGPRS(r1)
>>  	b	recheck
>>  
>> +restore_kprobe:
>> +	lwz	r3,GPR1(r1)
>> +	subi    r3,r3,INT_FRAME_SIZE; /* Allocate a trampoline exception frame */
>> +	mr	r4,r1
>> +	bl	copy_exc_stack	/* Copy from the original to the trampoline */
>> +
>> +	/* Do real stw operation to complete stwu */
>> +	mr	r4,r1
>> +	addi	r4,r4,INT_FRAME_SIZE	/* Get kprobed entry */
>> +	lwz	r5,GPR1(r1)		/* Backup r1 */
>> +	stw	r4,GPR1(r1)		/* Now store that safely */
> 
> The above confuses me. Shouldn't you do instead something like
> 
> 	lwz	r4,GPR1(r1)
> 	subi	r3,r4,INT_FRAME_SIZE
> 	li	r5,INT_FRAME_SIZE
> 	bl	memcpy
> 

Anyway I'll try this if you think memcpy is fine/safe in exception return codes.

> To start with, then you need to know the "old" r1 value which may or may
> not be related to your current r1. The emulation code should stash it

If the old r1 is not related to our current r1, it shouldn't be possible to go
restore_kprob since we set that new flag only for the current.

If I'm wrong please correct me :)

Thanks
Tiejun

> into the int frame in an unused slot such as "orig_gpr3" (since that
> only pertains to restarting syscalls which we aren't doing here).
> 
> Then you adjust your r1 and do something like
> 
> 	lwz	r3,GPR1(r1)
> 	lwz	r0,ORIG_GPR3(r1)
> 	stw	r0,0(r3)
> 
> To perform the store, before doing the rest:
>  
>> +	/* Reroute the trampoline frame to r1 */
>> +	subi    r5,r5,INT_FRAME_SIZE
>> +	mr	r1,r5
>> +
>> +	/* Clear _TIF_DELAYED_KPROBE flag */
>> +	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
>> +	lwz	r0,TI_FLAGS(r9)
>> +	rlwinm	r0,r0,0,_TIF_DELAYED_KPROBE
>> +	stw	r0,TI_FLAGS(r9)
>> +
>> +	b	restore
>> +
>>  /*
>>   * We come here when we are at the end of handling an exception
>>   * that occurred at a place where taking an exception will lose

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] powerpc/kprobe: introduce a new thread flag
  2011-12-12 22:58   ` Benjamin Herrenschmidt
@ 2011-12-13  4:56     ` tiejun.chen
  0 siblings, 0 replies; 16+ messages in thread
From: tiejun.chen @ 2011-12-13  4:56 UTC (permalink / raw)
  To: Benjamin Herrenschmidt; +Cc: linuxppc-dev

Benjamin Herrenschmidt wrote:
> On Mon, 2011-12-12 at 16:50 +0800, Tiejun Chen wrote:
>> We need to add a new thread flag, TIF_KPROBE/_TIF_DELAYED_KPROBE,
>> for handling kprobe operation while exiting exception.
> 
> The basic idea is sane, however the instruction emulation isn't per-se
> kprobe specific. It could be used by xmon too for example. I'd rather
> use a different name, something like TIF_EMULATE_STACK_STORE or

Its good term so I'll use this directly :)

Thanks
Tiejun

> something like that.
> 
> Cheers,
> Ben.
> 
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---
>>  arch/powerpc/include/asm/thread_info.h |    2 ++
>>  1 files changed, 2 insertions(+), 0 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
>> index 836f231..3378734 100644
>> --- a/arch/powerpc/include/asm/thread_info.h
>> +++ b/arch/powerpc/include/asm/thread_info.h
>> @@ -112,6 +112,7 @@ static inline struct thread_info *current_thread_info(void)
>>  #define TIF_FREEZE		14	/* Freezing for suspend */
>>  #define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */
>>  #define TIF_RUNLATCH		16	/* Is the runlatch enabled? */
>> +#define TIF_KPROBE		17	/* Is the delayed kprobe operation? */
>>  
>>  /* as above, but as bit values */
>>  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
>> @@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
>>  #define _TIF_FREEZE		(1<<TIF_FREEZE)
>>  #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
>>  #define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)
>> +#define _TIF_DELAYED_KPROBE	(1<<TIF_KPROBE)
>>  #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
>>  				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
>>  
> 
> 
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/4] ppc32/kprobe: introduce copy_exc_stack
  2011-12-12 23:01   ` Benjamin Herrenschmidt
@ 2011-12-13  4:58     ` tiejun.chen
  0 siblings, 0 replies; 16+ messages in thread
From: tiejun.chen @ 2011-12-13  4:58 UTC (permalink / raw)
  To: Benjamin Herrenschmidt; +Cc: linuxppc-dev

Benjamin Herrenschmidt wrote:
> On Mon, 2011-12-12 at 16:50 +0800, Tiejun Chen wrote:
>> We need a copy mechanism to migrate exception stack. But looks copy_page()
>> already implement this well so we can complete copy_exc_stack() based on
>> that directly.
> 
> I'd rather you don't hijack copy_page which is quite sensitive. The
> emulation isn't performance critical so a "dumber" routine would work

Yes, I just think we should introduce good performance so I 'steal' the original
copy_page().

> fine.
> 
> Why not use memcpy ? You can call it from assembly.

I'd like to switch to memcpy.

Thanks
Tiejun

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame
  2011-12-13  4:54     ` tiejun.chen
@ 2011-12-13  8:21       ` tiejun.chen
  2011-12-13 10:11         ` tiejun.chen
  2011-12-13 10:36       ` tiejun.chen
  1 sibling, 1 reply; 16+ messages in thread
From: tiejun.chen @ 2011-12-13  8:21 UTC (permalink / raw)
  To: Benjamin Herrenschmidt; +Cc: linuxppc-dev

>>
>> You need to hook into "resume_kernel" instead.
> 
> Maybe I'm misunderstanding what you mean since as I recall you suggestion we
> should do this at the end of do_work.
>

I regenerate this with hooking into resume_kernel in below.

>> Also, we may want to simplify the whole thing, instead of checking user
>> vs. kernel first etc... we could instead have a single _TIF_WORK_MASK
>> which includes both the bits for user work and the new bit for kernel
>> work. With preempt, the kernel work bits would also include
>> _TIF_NEED_RESCHED.
>>
>> Then you have in the common exit path, a single test for that, with a
>> fast path that skips everything and just goes to "restore" for both
>> kernel and user.
>>
>> The only possible issue is the setting of dbcr0 for BookE and 44x and we
>> can keep that as a special case keyed of MSR_PR in the resume path under
>> ifdef BOOKE (we'll probably sanitize that later with some different
>> rework anyway). 
>>
>> So the exit path because something like:
>>
>> ret_from_except:
>> 	.. hard disable interrupts (unchanged) ...
>> 	read TIF flags
>> 	andi with _TIF_WORK_MASK
>> 		nothing set -> restore
>> 	check PR
>> 		set -> do_work_user
>> 		no set -> do_work_kernel (kprobes & preempt)
>> 		(both loop until relevant _TIF flags are all clear)
>> restore:
>> 	#ifdef BOOKE & 44x test PR & do dbcr0 stuff if needed
>> 	... nornal restore ...
> 
> Do you mean we should reorganize current ret_from_except for ppc32 as well?

I assume it may not necessary to reorganize ret_from_except for *ppc32*.

> 
>>>  do_user_signal:			/* r10 contains MSR_KERNEL here */
>>>  	ori	r10,r10,MSR_EE
>>>  	SYNC
>>> @@ -1202,6 +1204,30 @@ do_user_signal:			/* r10 contains MSR_KERNEL here */
>>>  	REST_NVGPRS(r1)
>>>  	b	recheck
>>>  
>>> +restore_kprobe:
>>> +	lwz	r3,GPR1(r1)
>>> +	subi    r3,r3,INT_FRAME_SIZE; /* Allocate a trampoline exception frame */
>>> +	mr	r4,r1
>>> +	bl	copy_exc_stack	/* Copy from the original to the trampoline */
>>> +
>>> +	/* Do real stw operation to complete stwu */
>>> +	mr	r4,r1
>>> +	addi	r4,r4,INT_FRAME_SIZE	/* Get kprobed entry */
>>> +	lwz	r5,GPR1(r1)		/* Backup r1 */
>>> +	stw	r4,GPR1(r1)		/* Now store that safely */
>> The above confuses me. Shouldn't you do instead something like
>>
>> 	lwz	r4,GPR1(r1)

Now GPR1(r1) is already pointed with new r1 in emulate_step().

>> 	subi	r3,r4,INT_FRAME_SIZE

Here we need this, 'mr r4,r1', since r1 holds current exception stack.

>> 	li	r5,INT_FRAME_SIZE
>> 	bl	memcpy

Then the current exception stack is migrated below the kprobed function stack.

stack flow:

--------------------------  -> old r1 when hit 'stwu r1, -AA(r1)' in our
        ^       ^           kprobed function entry.
        |       |
        |       |------------> AA allocated for the kprobed function
        |       |
        |       v
--------|-----------------  -> new r1, also GPR1(r1). It holds the kprobed
   ^    |                   function stack , -AA(r1).
   |    |
   |    |--------------------> INT_FRAME_SIZE for program exception
   |    |
   |    v
---|---------------------  -> r1 is updated to hold program exception stack.
   |
   |
   |------------------------> migrate the exception stack (r1) below the
   |                        kprobed after memcpy with INT_FRAME_SIZE.
   v
-------------------------  -> reroute this as r1 for program exception stack.

>>
> 
> Anyway I'll try this if you think memcpy is fine/safe in exception return codes.
> 
>> To start with, then you need to know the "old" r1 value which may or may
>> not be related to your current r1. The emulation code should stash it
> 
> If the old r1 is not related to our current r1, it shouldn't be possible to go
> restore_kprob since we set that new flag only for the current.
		
If you agree what I say above, please check the follow:
======
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc..b6554c1 100644
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc..b6554c1 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -813,12 +813,40 @@ restore_user:

 #ifdef CONFIG_PREEMPT
        b       restore
+#endif

-/* N.B. the only way to get here is from the beq following ret_from_except. */
 resume_kernel:
        /* check current_thread_info->preempt_count */
        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r0,TI_PREEMPT(r9)
+       andis.  r0,r0,_TIF_EMULATE_STACK_STORE@h
+       beq+    restore
+
+       lwz     r3,GPR1(r1)
+       subi    r3,r3,INT_FRAME_SIZE    /* Allocate a trampoline exception frame */
+       mr      r4,r1
+       li      r5,INT_FRAME_SIZE
+       bl      memcpy                  /* Copy from the original to the
trampoline */
+
+       /* Do real store operation to complete stwu */
+       addi    r4,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
+       lwz     r5,GPR1(r1)
+       stw     r4,0(r5)                /* Now store that safely */
+
+       /* Reroute the trampoline frame to r1 */
+       subi    r1,r5,INT_FRAME_SIZE
+
+       /* Clear _TIF_EMULATE_STACK_STORE flag */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r0,TI_FLAGS(r9)
+       rlwinm  r0,r0,0,_TIF_EMULATE_STACK_STORE
+       stw     r0,TI_FLAGS(r9)
+
+#ifdef CONFIG_PREEMPT
+/* N.B. the only way to get here is from the beq following ret_from_except. */
+       /* check current_thread_info->preempt_count */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r0,TI_PREEMPT(r9)
        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
        bne     restore
        lwz     r0,TI_FLAGS(r9)
@@ -844,8 +872,6 @@ resume_kernel:
         */
        bl      trace_hardirqs_on
 #endif
-#else
-resume_kernel:
 #endif /* CONFIG_PREEMPT */

        /* interrupts are hard-disabled at this point */

Tiejun

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame
  2011-12-13  8:21       ` tiejun.chen
@ 2011-12-13 10:11         ` tiejun.chen
  0 siblings, 0 replies; 16+ messages in thread
From: tiejun.chen @ 2011-12-13 10:11 UTC (permalink / raw)
  To: Benjamin Herrenschmidt; +Cc: linuxppc-dev

Sorry please ignore this email since I'm missing something here :(

Tiejun

tiejun.chen wrote:
>>> You need to hook into "resume_kernel" instead.
>> Maybe I'm misunderstanding what you mean since as I recall you suggestion we
>> should do this at the end of do_work.
>>
> 
> I regenerate this with hooking into resume_kernel in below.
> 
>>> Also, we may want to simplify the whole thing, instead of checking user
>>> vs. kernel first etc... we could instead have a single _TIF_WORK_MASK
>>> which includes both the bits for user work and the new bit for kernel
>>> work. With preempt, the kernel work bits would also include
>>> _TIF_NEED_RESCHED.
>>>
>>> Then you have in the common exit path, a single test for that, with a
>>> fast path that skips everything and just goes to "restore" for both
>>> kernel and user.
>>>
>>> The only possible issue is the setting of dbcr0 for BookE and 44x and we
>>> can keep that as a special case keyed of MSR_PR in the resume path under
>>> ifdef BOOKE (we'll probably sanitize that later with some different
>>> rework anyway). 
>>>
>>> So the exit path because something like:
>>>
>>> ret_from_except:
>>> 	.. hard disable interrupts (unchanged) ...
>>> 	read TIF flags
>>> 	andi with _TIF_WORK_MASK
>>> 		nothing set -> restore
>>> 	check PR
>>> 		set -> do_work_user
>>> 		no set -> do_work_kernel (kprobes & preempt)
>>> 		(both loop until relevant _TIF flags are all clear)
>>> restore:
>>> 	#ifdef BOOKE & 44x test PR & do dbcr0 stuff if needed
>>> 	... nornal restore ...
>> Do you mean we should reorganize current ret_from_except for ppc32 as well?
> 
> I assume it may not necessary to reorganize ret_from_except for *ppc32*.
> 
>>>>  do_user_signal:			/* r10 contains MSR_KERNEL here */
>>>>  	ori	r10,r10,MSR_EE
>>>>  	SYNC
>>>> @@ -1202,6 +1204,30 @@ do_user_signal:			/* r10 contains MSR_KERNEL here */
>>>>  	REST_NVGPRS(r1)
>>>>  	b	recheck
>>>>  
>>>> +restore_kprobe:
>>>> +	lwz	r3,GPR1(r1)
>>>> +	subi    r3,r3,INT_FRAME_SIZE; /* Allocate a trampoline exception frame */
>>>> +	mr	r4,r1
>>>> +	bl	copy_exc_stack	/* Copy from the original to the trampoline */
>>>> +
>>>> +	/* Do real stw operation to complete stwu */
>>>> +	mr	r4,r1
>>>> +	addi	r4,r4,INT_FRAME_SIZE	/* Get kprobed entry */
>>>> +	lwz	r5,GPR1(r1)		/* Backup r1 */
>>>> +	stw	r4,GPR1(r1)		/* Now store that safely */
>>> The above confuses me. Shouldn't you do instead something like
>>>
>>> 	lwz	r4,GPR1(r1)
> 
> Now GPR1(r1) is already pointed with new r1 in emulate_step().
> 
>>> 	subi	r3,r4,INT_FRAME_SIZE
> 
> Here we need this, 'mr r4,r1', since r1 holds current exception stack.
> 
>>> 	li	r5,INT_FRAME_SIZE
>>> 	bl	memcpy
> 
> Then the current exception stack is migrated below the kprobed function stack.
> 
> stack flow:
> 
> --------------------------  -> old r1 when hit 'stwu r1, -AA(r1)' in our
>         ^       ^           kprobed function entry.
>         |       |
>         |       |------------> AA allocated for the kprobed function
>         |       |
>         |       v
> --------|-----------------  -> new r1, also GPR1(r1). It holds the kprobed
>    ^    |                   function stack , -AA(r1).
>    |    |
>    |    |--------------------> INT_FRAME_SIZE for program exception
>    |    |
>    |    v
> ---|---------------------  -> r1 is updated to hold program exception stack.
>    |
>    |
>    |------------------------> migrate the exception stack (r1) below the
>    |                        kprobed after memcpy with INT_FRAME_SIZE.
>    v
> -------------------------  -> reroute this as r1 for program exception stack.
> 
>> Anyway I'll try this if you think memcpy is fine/safe in exception return codes.
>>
>>> To start with, then you need to know the "old" r1 value which may or may
>>> not be related to your current r1. The emulation code should stash it
>> If the old r1 is not related to our current r1, it shouldn't be possible to go
>> restore_kprob since we set that new flag only for the current.
> 		
> If you agree what I say above, please check the follow:
> ======
> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
> index 56212bc..b6554c1 100644
> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
> index 56212bc..b6554c1 100644
> --- a/arch/powerpc/kernel/entry_32.S
> +++ b/arch/powerpc/kernel/entry_32.S
> @@ -813,12 +813,40 @@ restore_user:
> 
>  #ifdef CONFIG_PREEMPT
>         b       restore
> +#endif
> 
> -/* N.B. the only way to get here is from the beq following ret_from_except. */
>  resume_kernel:
>         /* check current_thread_info->preempt_count */
>         rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
>         lwz     r0,TI_PREEMPT(r9)
> +       andis.  r0,r0,_TIF_EMULATE_STACK_STORE@h
> +       beq+    restore
> +
> +       lwz     r3,GPR1(r1)
> +       subi    r3,r3,INT_FRAME_SIZE    /* Allocate a trampoline exception frame */
> +       mr      r4,r1
> +       li      r5,INT_FRAME_SIZE
> +       bl      memcpy                  /* Copy from the original to the
> trampoline */
> +
> +       /* Do real store operation to complete stwu */
> +       addi    r4,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
> +       lwz     r5,GPR1(r1)
> +       stw     r4,0(r5)                /* Now store that safely */
> +
> +       /* Reroute the trampoline frame to r1 */
> +       subi    r1,r5,INT_FRAME_SIZE
> +
> +       /* Clear _TIF_EMULATE_STACK_STORE flag */
> +       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
> +       lwz     r0,TI_FLAGS(r9)
> +       rlwinm  r0,r0,0,_TIF_EMULATE_STACK_STORE
> +       stw     r0,TI_FLAGS(r9)
> +
> +#ifdef CONFIG_PREEMPT
> +/* N.B. the only way to get here is from the beq following ret_from_except. */
> +       /* check current_thread_info->preempt_count */
> +       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
> +       lwz     r0,TI_PREEMPT(r9)
>         cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
>         bne     restore
>         lwz     r0,TI_FLAGS(r9)
> @@ -844,8 +872,6 @@ resume_kernel:
>          */
>         bl      trace_hardirqs_on
>  #endif
> -#else
> -resume_kernel:
>  #endif /* CONFIG_PREEMPT */
> 
>         /* interrupts are hard-disabled at this point */
> 
> Tiejun
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame
  2011-12-13  4:54     ` tiejun.chen
  2011-12-13  8:21       ` tiejun.chen
@ 2011-12-13 10:36       ` tiejun.chen
  2011-12-15  0:37         ` Benjamin Herrenschmidt
  1 sibling, 1 reply; 16+ messages in thread
From: tiejun.chen @ 2011-12-13 10:36 UTC (permalink / raw)
  To: Benjamin Herrenschmidt; +Cc: linuxppc-dev

>> You need to hook into "resume_kernel" instead.
>

I regenerate this with hooking into resume_kernel in below.

> Maybe I'm misunderstanding what you mean since as I recall you suggestion we
> should do this at the end of do_work.
> 
>> Also, we may want to simplify the whole thing, instead of checking user
>> vs. kernel first etc... we could instead have a single _TIF_WORK_MASK
>> which includes both the bits for user work and the new bit for kernel
>> work. With preempt, the kernel work bits would also include
>> _TIF_NEED_RESCHED.
>>
>> Then you have in the common exit path, a single test for that, with a
>> fast path that skips everything and just goes to "restore" for both
>> kernel and user.
>>
>> The only possible issue is the setting of dbcr0 for BookE and 44x and we
>> can keep that as a special case keyed of MSR_PR in the resume path under
>> ifdef BOOKE (we'll probably sanitize that later with some different
>> rework anyway). 
>>
>> So the exit path because something like:
>>
>> ret_from_except:
>> 	.. hard disable interrupts (unchanged) ...
>> 	read TIF flags
>> 	andi with _TIF_WORK_MASK
>> 		nothing set -> restore
>> 	check PR
>> 		set -> do_work_user
>> 		no set -> do_work_kernel (kprobes & preempt)
>> 		(both loop until relevant _TIF flags are all clear)
>> restore:
>> 	#ifdef BOOKE & 44x test PR & do dbcr0 stuff if needed
>> 	... nornal restore ...
> 
> Do you mean we should reorganize current ret_from_except for ppc32 as well?
> 

I assume it may not necessary to reorganize ret_from_except for *ppc32* .

>>>  do_user_signal:			/* r10 contains MSR_KERNEL here */
>>>  	ori	r10,r10,MSR_EE
>>>  	SYNC
>>> @@ -1202,6 +1204,30 @@ do_user_signal:			/* r10 contains MSR_KERNEL here */
>>>  	REST_NVGPRS(r1)
>>>  	b	recheck
>>>  
>>> +restore_kprobe:
>>> +	lwz	r3,GPR1(r1)
>>> +	subi    r3,r3,INT_FRAME_SIZE; /* Allocate a trampoline exception frame */
>>> +	mr	r4,r1
>>> +	bl	copy_exc_stack	/* Copy from the original to the trampoline */
>>> +
>>> +	/* Do real stw operation to complete stwu */
>>> +	mr	r4,r1
>>> +	addi	r4,r4,INT_FRAME_SIZE	/* Get kprobed entry */
>>> +	lwz	r5,GPR1(r1)		/* Backup r1 */
>>> +	stw	r4,GPR1(r1)		/* Now store that safely */
>> The above confuses me. Shouldn't you do instead something like
>>
>> 	lwz	r4,GPR1(r1)

Now GPR1(r1) is already pointed with new r1 in emulate_step().

>> 	subi	r3,r4,INT_FRAME_SIZE

Here we need this, 'mr r4,r1', since r1 holds current exception stack.

>> 	li	r5,INT_FRAME_SIZE
>> 	bl	memcpy

Then the current exception stack is migrated below the kprobed function stack.

stack flow:

--------------------------  -> old r1 when hit 'stwu r1, -AA(r1)' in our
        ^       ^           kprobed function entry.
        |       |
        |       |------------> AA allocated for the kprobed function
        |       |
        |       v
--------|-----------------  -> new r1, also GPR1(r1). It holds the kprobed
   ^    |                   function stack , -AA(r1).
   |    |
   |    |--------------------> INT_FRAME_SIZE for program exception
   |    |
   |    v
---|---------------------  -> r1 is updated to hold program exception stack.
   |
   |
   |------------------------> migrate the exception stack (r1) below the
   |                        kprobed after memcpy with INT_FRAME_SIZE.
   v
-------------------------  -> reroute this as r1 for program exception stack.

>>
> 
> Anyway I'll try this if you think memcpy is fine/safe in exception return codes.
> 
>> To start with, then you need to know the "old" r1 value which may or may
>> not be related to your current r1. The emulation code should stash it
> 
> If the old r1 is not related to our current r1, it shouldn't be possible to go
> restore_kprob since we set that new flag only for the current.
> 
> If I'm wrong please correct me :)

If you agree what I say above, and its also avoid any issue introduced with
orig_gpr3, please check the follow:
=========
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc..277029d 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -813,9 +813,40 @@ restore_user:

 #ifdef CONFIG_PREEMPT
        b       restore
+#endif

-/* N.B. the only way to get here is from the beq following ret_from_except. */
 resume_kernel:
+#ifdef CONFIG_KPROBES
+       /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r0,TI_FLAGS(r9)
+       andis.  r0,r0,_TIF_EMULATE_STACK_STORE@h
+       beq+    restore_kernel
+
+       addi    r9,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
+
+       lwz     r3,GPR1(r1)
+       subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception
frame */
+       mr      r4,r1                   /* src:  current exception frame */
+       li      r5,INT_FRAME_SIZE       /* size: INT_FRAME_SIZE */
+       mr      r1,r3                   /* Reroute the trampoline frame to r1 */
+       bl      memcpy                  /* Copy from the original to the
trampoline */
+
+       /* Do real store operation to complete stwu */
+       lwz     r5,GPR1(r1)
+       stw     r9,0(r5)
+
+       /* Clear _TIF_EMULATE_STACK_STORE flag */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r0,TI_FLAGS(r9)
+       rlwinm  r0,r0,0,_TIF_EMULATE_STACK_STORE
+       stw     r0,TI_FLAGS(r9)
+
+restore_kernel:
+#endif
+
+#ifdef CONFIG_PREEMPT
+/* N.B. the only way to get here is from the beq following ret_from_except. */
        /* check current_thread_info->preempt_count */
        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r0,TI_PREEMPT(r9)
@@ -844,8 +875,6 @@ resume_kernel:
         */
        bl      trace_hardirqs_on
 #endif
-#else
-resume_kernel:
 #endif /* CONFIG_PREEMPT */

        /* interrupts are hard-disabled at this point */

Tiejun

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame
  2011-12-13 10:36       ` tiejun.chen
@ 2011-12-15  0:37         ` Benjamin Herrenschmidt
  2011-12-15 11:19           ` tiejun.chen
  0 siblings, 1 reply; 16+ messages in thread
From: Benjamin Herrenschmidt @ 2011-12-15  0:37 UTC (permalink / raw)
  To: tiejun.chen; +Cc: linuxppc-dev

On Tue, 2011-12-13 at 18:36 +0800, tiejun.chen wrote:
> >> You need to hook into "resume_kernel" instead.
> >
> 
> I regenerate this with hooking into resume_kernel in below.

 .../...

> I assume it may not necessary to reorganize ret_from_except for *ppc32* .

It might be cleaner but I can do that myself later.

> >>>  do_user_signal:			/* r10 contains MSR_KERNEL here */
> >>>  	ori	r10,r10,MSR_EE
> >>>  	SYNC
> >>> @@ -1202,6 +1204,30 @@ do_user_signal:			/* r10 contains MSR_KERNEL here */
> >>>  	REST_NVGPRS(r1)
> >>>  	b	recheck
> >>>  
> >>> +restore_kprobe:
> >>> +	lwz	r3,GPR1(r1)
> >>> +	subi    r3,r3,INT_FRAME_SIZE; /* Allocate a trampoline exception frame */
> >>> +	mr	r4,r1
> >>> +	bl	copy_exc_stack	/* Copy from the original to the trampoline */
> >>> +
> >>> +	/* Do real stw operation to complete stwu */
> >>> +	mr	r4,r1
> >>> +	addi	r4,r4,INT_FRAME_SIZE	/* Get kprobed entry */
> >>> +	lwz	r5,GPR1(r1)		/* Backup r1 */
> >>> +	stw	r4,GPR1(r1)		/* Now store that safely */
> >> The above confuses me. Shouldn't you do instead something like
> >>
> >> 	lwz	r4,GPR1(r1)
> 
> Now GPR1(r1) is already pointed with new r1 in emulate_step().

Right

> >> 	subi	r3,r4,INT_FRAME_SIZE
> 
> Here we need this, 'mr r4,r1', since r1 holds current exception stack.

Right.

> >> 	li	r5,INT_FRAME_SIZE
> >> 	bl	memcpy
> 
> Then the current exception stack is migrated below the kprobed function stack.
> 
> stack flow:
> 
> --------------------------  -> old r1 when hit 'stwu r1, -AA(r1)' in our
>         ^       ^           kprobed function entry.
>         |       |
>         |       |------------> AA allocated for the kprobed function
>         |       |
>         |       v
> --------|-----------------  -> new r1, also GPR1(r1). It holds the kprobed
>    ^    |                   function stack , -AA(r1).
>    |    |
>    |    |--------------------> INT_FRAME_SIZE for program exception
>    |    |
>    |    v
> ---|---------------------  -> r1 is updated to hold program exception stack.
>    |
>    |
>    |------------------------> migrate the exception stack (r1) below the
>    |                        kprobed after memcpy with INT_FRAME_SIZE.
>    v
> -------------------------  -> reroute this as r1 for program exception stack.

I see so you simply assume that the old r1 value is the current r1 +
INT_FRAME_SIZE, which is probably fair enough.

BTW. we should probably WARN_ON if emulate_step tries to set the new TIF
flag and sees it already set since that means we'll lose the previous
value.

> >>
> > 
> > Anyway I'll try this if you think memcpy is fine/safe in exception return codes.
> > 
> >> To start with, then you need to know the "old" r1 value which may or may
> >> not be related to your current r1. The emulation code should stash it
> > 
> > If the old r1 is not related to our current r1, it shouldn't be possible to go
> > restore_kprob since we set that new flag only for the current.
> > 
> > If I'm wrong please correct me :)
> 
> If you agree what I say above, and its also avoid any issue introduced with
> orig_gpr3, please check the follow:
> =========
> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
> index 56212bc..277029d 100644
> --- a/arch/powerpc/kernel/entry_32.S
> +++ b/arch/powerpc/kernel/entry_32.S
> @@ -813,9 +813,40 @@ restore_user:
> 
>  #ifdef CONFIG_PREEMPT
>         b       restore
> +#endif

The above means that if !PREEMPT, a userspace return -will- fo into
your new code, while with PREEMPT it won't. This is inconsistent. Now
we should never need that for userspace returns (and indeed you should
double check in emulate step that you are only applying this when
regs->msr & MSR_PR is 0). The above branch should basically become
unconditional.

> -/* N.B. the only way to get here is from the beq following ret_from_except. */
>  resume_kernel:
> +#ifdef CONFIG_KPROBES

Don't make this KPROBES specific. Anything using emulate_step (such as
xmon) might need that too.

> +       /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
> +       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
> +       lwz     r0,TI_FLAGS(r9)
> +       andis.  r0,r0,_TIF_EMULATE_STACK_STORE@h
> +       beq+    restore_kernel

So you are introducing a new symbol restore_kernel, you could just
branch to "restore". However, that would mean putting the preempt
case before the kprobe case. But don't we want to do that anyway ?

I don't like keeping that "offsetted" return stack accross a preempt.

> +       addi    r9,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
> +
> +       lwz     r3,GPR1(r1)
> +       subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception
> frame */
> +       mr      r4,r1                   /* src:  current exception frame */
> +       li      r5,INT_FRAME_SIZE       /* size: INT_FRAME_SIZE */
> +       mr      r1,r3                   /* Reroute the trampoline frame to r1 */
> +       bl      memcpy                  /* Copy from the original to the
> trampoline */
> +
> +       /* Do real store operation to complete stwu */
> +       lwz     r5,GPR1(r1)
> +       stw     r9,0(r5)
>
> +       /* Clear _TIF_EMULATE_STACK_STORE flag */
> +       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
> +       lwz     r0,TI_FLAGS(r9)
> +       rlwinm  r0,r0,0,_TIF_EMULATE_STACK_STORE
> +       stw     r0,TI_FLAGS(r9)

I think this needs to be an atomic operation, another CPU can be trying
to set _NEED_RESCHED at the same time.

> +restore_kernel:
> +#endif
> +
> +#ifdef CONFIG_PREEMPT
> +/* N.B. the only way to get here is from the beq following ret_from_except. */
>         /* check current_thread_info->preempt_count */
>         rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
>         lwz     r0,TI_PREEMPT(r9)
> @@ -844,8 +875,6 @@ resume_kernel:
>          */
>         bl      trace_hardirqs_on
>  #endif
> -#else
> -resume_kernel:
>  #endif /* CONFIG_PREEMPT */
> 
>         /* interrupts are hard-disabled at this point */
> 
> Tiejun

Cheers,
Ben.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame
  2011-12-15  0:37         ` Benjamin Herrenschmidt
@ 2011-12-15 11:19           ` tiejun.chen
  0 siblings, 0 replies; 16+ messages in thread
From: tiejun.chen @ 2011-12-15 11:19 UTC (permalink / raw)
  To: Benjamin Herrenschmidt; +Cc: linuxppc-dev

Looks we have to go into 'restore' at last as I said previously. I send v2 based
on your all comments.

>> I assume it may not necessary to reorganize ret_from_except for *ppc32* .
> 
> It might be cleaner but I can do that myself later.
> 

I have this version but I'm not 100% sure if its as you expect :)

#define _TIF_WORK_MASK (_TIF_USER_WORK_MASK | _TIF_EMULATE_STACK_STORE)

======
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc..e52b586 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -791,41 +791,29 @@ ret_from_except:
        SYNC                    /* Some chip revs have problems here... */
        MTMSRD(r10)             /* disable interrupts */

-       lwz     r3,_MSR(r1)     /* Returning to user mode? */
-       andi.   r0,r3,MSR_PR
-       beq     resume_kernel
-
 user_exc_return:               /* r10 contains MSR_KERNEL here */
        /* Check current_thread_info()->flags */
        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r9,TI_FLAGS(r9)
-       andi.   r0,r9,_TIF_USER_WORK_MASK
-       bne     do_work
+       andi.   r0,r9,_TIF_WORK_MASK
+       beq     restore

-restore_user:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-       /* Check whether this process has its own DBCR0 value.  The internal
-          debug mode bit tells us that dbcr0 should be loaded. */
-       lwz     r0,THREAD+THREAD_DBCR0(r2)
-       andis.  r10,r0,DBCR0_IDM@h
-       bnel-   load_dbcr0
-#endif
+       lwz     r3,_MSR(r1)     /* Returning to user mode? */
+       andi.   r0,r3,MSR_PR
+       bne     do_user_work

 #ifdef CONFIG_PREEMPT
-       b       restore
-
 /* N.B. the only way to get here is from the beq following ret_from_except. */
-resume_kernel:
        /* check current_thread_info->preempt_count */
        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r0,TI_PREEMPT(r9)
        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
-       bne     restore
+       bne     2f
        lwz     r0,TI_FLAGS(r9)
        andi.   r0,r0,_TIF_NEED_RESCHED
-       beq+    restore
+       beq+    2f
        andi.   r0,r3,MSR_EE    /* interrupts off? */
-       beq     restore         /* don't schedule if so */
+       beq     2f      /* don't schedule if so */
 #ifdef CONFIG_TRACE_IRQFLAGS
        /* Lockdep thinks irqs are enabled, we need to call
         * preempt_schedule_irq with IRQs off, so we inform lockdep
@@ -844,12 +832,54 @@ resume_kernel:
         */
        bl      trace_hardirqs_on
 #endif
-#else
-resume_kernel:
+2:
 #endif /* CONFIG_PREEMPT */

+       /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r0,TI_FLAGS(r9)
+       andis.  r0,r0,_TIF_EMULATE_STACK_STORE@h
+       beq+    restore
+
+       addi    r9,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
+
+       lwz     r3,GPR1(r1)
+       subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception
frame */
+       mr      r4,r1                   /* src:  current exception frame */
+       li      r5,INT_FRAME_SIZE       /* size: INT_FRAME_SIZE */
+       mr      r1,r3                   /* Reroute the trampoline frame to r1 */
+       bl      memcpy                  /* Copy from the original to the
trampoline */
+
+       /* Do real store operation to complete stwu */
+       lwz     r5,GPR1(r1)
+       stw     r9,0(r5)
+
+       /* Do real store operation to complete stwu */
+       lwz     r5,GPR1(r1)
+       stw     r9,0(r5)
+
+       /* Clear _TIF_EMULATE_STACK_STORE flag */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lis     r11,_TIF_EMULATE_STACK_STORE@h
+       addi    r9,r9,TI_FLAGS
+0:     lwarx   r8,0,r9
+       andc    r8,r8,r11
+#ifdef CONFIG_IBM405_ERR77
+       dcbt    0,r9
+#endif
+       stwcx.  r8,0,r9
+       bne-    0b
+
        /* interrupts are hard-disabled at this point */
 restore:
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+       lwz     r3,_MSR(r1)     /* Returning to user mode? */
+       andi.   r0,r3,MSR_PR
+       beq     1f
+       /* Check whether this process has its own DBCR0 value.  The internal
+          debug mode bit tells us that dbcr0 should be loaded. */
+       lwz     r0,THREAD+THREAD_DBCR0(r2)
+       andis.  r10,r0,DBCR0_IDM@h
+       bnel-   load_dbcr0
+1:
+#endif
+
 #ifdef CONFIG_44x
 BEGIN_MMU_FTR_SECTION
        b       1f
@@ -1159,7 +1189,7 @@ global_dbcr0:
        .previous
 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */

-do_work:                       /* r10 contains MSR_KERNEL here */
+do_user_work:                  /* r10 contains MSR_KERNEL here */
        andi.   r0,r9,_TIF_NEED_RESCHED
        beq     do_user_signal

@@ -1184,7 +1214,7 @@ recheck:
        andi.   r0,r9,_TIF_NEED_RESCHED
        bne-    do_resched
        andi.   r0,r9,_TIF_USER_WORK_MASK
-       beq     restore_user
+       beq     restore
 do_user_signal:                        /* r10 contains MSR_KERNEL here */
        ori     r10,r10,MSR_EE
        SYNC

Tiejun

Thanks
Tiejun

^ permalink raw reply related	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2011-12-15 11:20 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-12-12  8:50 ppc32/kprobe: Fix a bug for kprobe stwu r1 Tiejun Chen
2011-12-12  8:50 ` [PATCH 1/4] powerpc/kprobe: introduce a new thread flag Tiejun Chen
2011-12-12 22:58   ` Benjamin Herrenschmidt
2011-12-13  4:56     ` tiejun.chen
2011-12-12  8:50 ` [PATCH 2/4] ppc32/kprobe: introduce copy_exc_stack Tiejun Chen
2011-12-12 23:01   ` Benjamin Herrenschmidt
2011-12-13  4:58     ` tiejun.chen
2011-12-12  8:50 ` [PATCH 3/4] ppc32/kprobe: complete kprobe and migrate exception frame Tiejun Chen
2011-12-12 23:19   ` Benjamin Herrenschmidt
2011-12-13  4:54     ` tiejun.chen
2011-12-13  8:21       ` tiejun.chen
2011-12-13 10:11         ` tiejun.chen
2011-12-13 10:36       ` tiejun.chen
2011-12-15  0:37         ` Benjamin Herrenschmidt
2011-12-15 11:19           ` tiejun.chen
2011-12-12  8:50 ` [PATCH 4/4] ppc32/kprobe: don't emulate store when kprobe stwu r1 Tiejun Chen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).