* [v5][PATCH 1/3] powerpc/kprobe: introduce a new thread flag
@ 2012-09-17 9:54 Tiejun Chen
2012-09-17 9:54 ` [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame Tiejun Chen
2012-09-17 9:54 ` [v5][PATCH 3/3] powerpc/kprobe: don't emulate store when kprobe stwu r1 Tiejun Chen
0 siblings, 2 replies; 8+ messages in thread
From: Tiejun Chen @ 2012-09-17 9:54 UTC (permalink / raw)
To: benh; +Cc: linuxppc-dev
We need to add a new thread flag, TIF_EMULATE_STACK_STORE,
for emulating stack store operation while exiting exception.
Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
arch/powerpc/include/asm/thread_info.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index e942203..8ceea14 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -104,6 +104,8 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
#define TIF_UPROBE 14 /* breakpointed or single-stepping */
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
+#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
+ for stack store? */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -121,6 +123,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
--
1.7.9.5
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame
2012-09-17 9:54 [v5][PATCH 1/3] powerpc/kprobe: introduce a new thread flag Tiejun Chen
@ 2012-09-17 9:54 ` Tiejun Chen
2012-09-17 10:02 ` David Laight
2012-09-18 5:05 ` Benjamin Herrenschmidt
2012-09-17 9:54 ` [v5][PATCH 3/3] powerpc/kprobe: don't emulate store when kprobe stwu r1 Tiejun Chen
1 sibling, 2 replies; 8+ messages in thread
From: Tiejun Chen @ 2012-09-17 9:54 UTC (permalink / raw)
To: benh; +Cc: linuxppc-dev
We can't emulate stwu since that may corrupt current exception stack.
So we will have to do real store operation in the exception return code.
Firstly we'll allocate a trampoline exception frame below the kprobed
function stack and copy the current exception frame to the trampoline.
Then we can do this real store operation to implement 'stwu', and reroute
the trampoline frame to r1 to complete this exception migration.
Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
v5:
* Simplify copy operation
arch/powerpc/kernel/entry_32.S | 49 +++++++++++++++++++++++++++++++++++-----
arch/powerpc/kernel/entry_64.S | 37 ++++++++++++++++++++++++++++++
2 files changed, 80 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ead5016..d27fe36 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -831,19 +831,58 @@ restore_user:
bnel- load_dbcr0
#endif
-#ifdef CONFIG_PREEMPT
b restore
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel:
- /* check current_thread_info->preempt_count */
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
+ lwz r8,TI_FLAGS(r9)
+ andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ li r5,INT_FRAME_SIZE /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r6,0
+ srwi r5,r5,2
+ mtctr r5
+2: lwzx r0,r6,r4
+ stwx r0,r6,r3
+ addi r6,r6,4
+ bdnz 2b
+
+ /* Do real store operation to complete stwu */
+ lwz r5,GPR1(r1)
+ stw r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+0: lwarx r8,0,r5
+ andc r8,r8,r11
+#ifdef CONFIG_IBM405_ERR77
+ dcbt 0,r5
+#endif
+ stwcx. r8,0,r5
+ bne- 0b
+1:
+
+#ifdef CONFIG_PREEMPT
+ /* check current_thread_info->preempt_count */
lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
- lwz r0,TI_FLAGS(r9)
- andi. r0,r0,_TIF_NEED_RESCHED
+ andi. r8,r8,_TIF_NEED_RESCHED
beq+ restore
+ lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -864,8 +903,6 @@ resume_kernel:
*/
bl trace_hardirqs_on
#endif
-#else
-resume_kernel:
#endif /* CONFIG_PREEMPT */
/* interrupts are hard-disabled at this point */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b40e0b4..bdd2dc1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -593,6 +593,43 @@ _GLOBAL(ret_from_except_lite)
b .ret_from_except
resume_kernel:
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r8,TI_FLAGS(r9)
+ andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ li r5,INT_FRAME_SIZE /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r6,0
+ srwi r5,r5,3
+ mtctr r5
+2: ldx r0,r6,r4
+ stdx r0,r6,r3
+ addi r6,r6,8
+ bdnz 2b
+
+ /* Do real store operation to complete stwu */
+ lwz r5,GPR1(r1)
+ std r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+ ldarx r4,0,r5
+ andc r4,r4,r11
+ stdcx. r4,0,r5
+ bne- 0b
+1:
+
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
andi. r0,r4,_TIF_NEED_RESCHED
--
1.7.9.5
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [v5][PATCH 3/3] powerpc/kprobe: don't emulate store when kprobe stwu r1
2012-09-17 9:54 [v5][PATCH 1/3] powerpc/kprobe: introduce a new thread flag Tiejun Chen
2012-09-17 9:54 ` [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame Tiejun Chen
@ 2012-09-17 9:54 ` Tiejun Chen
1 sibling, 0 replies; 8+ messages in thread
From: Tiejun Chen @ 2012-09-17 9:54 UTC (permalink / raw)
To: benh; +Cc: linuxppc-dev
We don't do the real store operation for kprobing 'stwu Rx,(y)R1'
since this may corrupt the exception frame, now we will do this
operation safely in exception return code after migrate current
exception frame below the kprobed function stack.
So we only update gpr[1] here and trigger a thread flag to mask
this.
Note we should make sure if we trigger kernel stack over flow.
Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
arch/powerpc/lib/sstep.c | 36 ++++++++++++++++++++++++++++++++++--
1 file changed, 34 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 9a52349..e15c521 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -566,7 +566,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
unsigned long int ea;
unsigned int cr, mb, me, sh;
int err;
- unsigned long old_ra;
+ unsigned long old_ra, val3;
long ival;
opcode = instr >> 26;
@@ -1486,11 +1486,43 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto ldst_done;
case 36: /* stw */
- case 37: /* stwu */
val = regs->gpr[rd];
err = write_mem(val, dform_ea(instr, regs), 4, regs);
goto ldst_done;
+ case 37: /* stwu */
+ val = regs->gpr[rd];
+ val3 = dform_ea(instr, regs);
+ /*
+ * For PPC32 we always use stwu to change stack point with r1. So
+ * this emulated store may corrupt the exception frame, now we
+ * have to provide the exception frame trampoline, which is pushed
+ * below the kprobed function stack. So we only update gpr[1] but
+ * don't emulate the real store operation. We will do real store
+ * operation safely in exception return code by checking this flag.
+ */
+ if ((ra == 1) && !(regs->msr & MSR_PR) \
+ && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+ /*
+ * Check if we will touch kernel sack overflow
+ */
+ if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
+ printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n");
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Check if we already set since that means we'll
+ * lose the previous value.
+ */
+ WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
+ set_thread_flag(TIF_EMULATE_STACK_STORE);
+ err = 0;
+ } else
+ err = write_mem(val, val3, 4, regs);
+ goto ldst_done;
+
case 38: /* stb */
case 39: /* stbu */
val = regs->gpr[rd];
--
1.7.9.5
^ permalink raw reply related [flat|nested] 8+ messages in thread
* RE: [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame
2012-09-17 9:54 ` [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame Tiejun Chen
@ 2012-09-17 10:02 ` David Laight
2012-09-17 10:19 ` tiejun.chen
2012-09-18 5:05 ` Benjamin Herrenschmidt
1 sibling, 1 reply; 8+ messages in thread
From: David Laight @ 2012-09-17 10:02 UTC (permalink / raw)
To: Tiejun Chen, benh; +Cc: linuxppc-dev
> /* N.B. the only way to get here is from the beq following =
ret_from_except. */
> resume_kernel:
> - /* check current_thread_info->preempt_count */
> + /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
> CURRENT_THREAD_INFO(r9, r1)
> + lwz r8,TI_FLAGS(r9)
> + andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
> + beq+ 1f
...
> +1:
Does this add a statically mispredicted branch to every
return to userspace ?
Or is there an earlier check for 'unlikely' conditions.
David
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame
2012-09-17 10:02 ` David Laight
@ 2012-09-17 10:19 ` tiejun.chen
0 siblings, 0 replies; 8+ messages in thread
From: tiejun.chen @ 2012-09-17 10:19 UTC (permalink / raw)
To: David Laight; +Cc: linuxppc-dev
On 09/17/2012 06:02 PM, David Laight wrote:
>> /* N.B. the only way to get here is from the beq following ret_from_except. */
>> resume_kernel:
>> - /* check current_thread_info->preempt_count */
>> + /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
>> CURRENT_THREAD_INFO(r9, r1)
>> + lwz r8,TI_FLAGS(r9)
>> + andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
>> + beq+ 1f
> ...
>> +1:
>
> Does this add a statically mispredicted branch to every
> return to userspace ?
Return usersapce? No, this is just follow 'resume_kernel'.
Note I add this 'unlikely' here since I assume often Kprobe is always disabled
by default and especially its also rare to kprobe 'stwu' in many kprobe cases.
Tiejun
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame
2012-09-17 9:54 ` [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame Tiejun Chen
2012-09-17 10:02 ` David Laight
@ 2012-09-18 5:05 ` Benjamin Herrenschmidt
2012-09-18 5:09 ` Benjamin Herrenschmidt
1 sibling, 1 reply; 8+ messages in thread
From: Benjamin Herrenschmidt @ 2012-09-18 5:05 UTC (permalink / raw)
To: Tiejun Chen; +Cc: linuxppc-dev
On Mon, 2012-09-17 at 17:54 +0800, Tiejun Chen wrote:
> -#ifdef CONFIG_PREEMPT
> b restore
>
> /* N.B. the only way to get here is from the beq following ret_from_except. */
> resume_kernel:
> - /* check current_thread_info->preempt_count */
> + /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
> CURRENT_THREAD_INFO(r9, r1)
> + lwz r8,TI_FLAGS(r9)
> + andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
> + beq+ 1f
> +
> + addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
> +
> + lwz r3,GPR1(r1)
> + subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
> + mr r4,r1 /* src: current exception frame */
> + li r5,INT_FRAME_SIZE /* size: INT_FRAME_SIZE */
> + li r6,0 /* start offset: 0 */
> + mr r1,r3 /* Reroute the trampoline frame to r1 */
> +
> + /* Copy from the original to the trampoline. */
> + li r6,0
You just did that li r6,0 2 lines above :-) I'll fix it up manually
while applying.
> + srwi r5,r5,2
> + mtctr r5
> +2: lwzx r0,r6,r4
> + stwx r0,r6,r3
> + addi r6,r6,4
> + bdnz 2b
> +
> + /* Do real store operation to complete stwu */
> + lwz r5,GPR1(r1)
> + stw r8,0(r5)
> +
> + /* Clear _TIF_EMULATE_STACK_STORE flag */
> + lis r11,_TIF_EMULATE_STACK_STORE@h
> + addi r5,r9,TI_FLAGS
> +0: lwarx r8,0,r5
> + andc r8,r8,r11
> +#ifdef CONFIG_IBM405_ERR77
> + dcbt 0,r5
> +#endif
> + stwcx. r8,0,r5
> + bne- 0b
> +1:
> +
> +#ifdef CONFIG_PREEMPT
> + /* check current_thread_info->preempt_count */
> lwz r0,TI_PREEMPT(r9)
> cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
> bne restore
> - lwz r0,TI_FLAGS(r9)
> - andi. r0,r0,_TIF_NEED_RESCHED
> + andi. r8,r8,_TIF_NEED_RESCHED
> beq+ restore
> + lwz r3,_MSR(r1)
> andi. r0,r3,MSR_EE /* interrupts off? */
> beq restore /* don't schedule if so */
> #ifdef CONFIG_TRACE_IRQFLAGS
> @@ -864,8 +903,6 @@ resume_kernel:
> */
> bl trace_hardirqs_on
> #endif
> -#else
> -resume_kernel:
> #endif /* CONFIG_PREEMPT */
>
> /* interrupts are hard-disabled at this point */
> diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
> index b40e0b4..bdd2dc1 100644
> --- a/arch/powerpc/kernel/entry_64.S
> +++ b/arch/powerpc/kernel/entry_64.S
> @@ -593,6 +593,43 @@ _GLOBAL(ret_from_except_lite)
> b .ret_from_except
>
> resume_kernel:
> + /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
> + CURRENT_THREAD_INFO(r9, r1)
> + ld r8,TI_FLAGS(r9)
> + andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
> + beq+ 1f
> +
> + addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
> +
> + lwz r3,GPR1(r1)
> + subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
> + mr r4,r1 /* src: current exception frame */
> + li r5,INT_FRAME_SIZE /* size: INT_FRAME_SIZE */
> + li r6,0 /* start offset: 0 */
> + mr r1,r3 /* Reroute the trampoline frame to r1 */
> +
> + /* Copy from the original to the trampoline. */
> + li r6,0
> + srwi r5,r5,3
> + mtctr r5
> +2: ldx r0,r6,r4
> + stdx r0,r6,r3
> + addi r6,r6,8
> + bdnz 2b
> +
> + /* Do real store operation to complete stwu */
> + lwz r5,GPR1(r1)
> + std r8,0(r5)
> +
> + /* Clear _TIF_EMULATE_STACK_STORE flag */
> + lis r11,_TIF_EMULATE_STACK_STORE@h
> + addi r5,r9,TI_FLAGS
> + ldarx r4,0,r5
> + andc r4,r4,r11
> + stdcx. r4,0,r5
> + bne- 0b
> +1:
> +
> #ifdef CONFIG_PREEMPT
> /* Check if we need to preempt */
> andi. r0,r4,_TIF_NEED_RESCHED
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame
2012-09-18 5:05 ` Benjamin Herrenschmidt
@ 2012-09-18 5:09 ` Benjamin Herrenschmidt
2012-09-18 6:13 ` tiejun.chen
0 siblings, 1 reply; 8+ messages in thread
From: Benjamin Herrenschmidt @ 2012-09-18 5:09 UTC (permalink / raw)
To: Tiejun Chen; +Cc: linuxppc-dev
On Tue, 2012-09-18 at 15:05 +1000, Benjamin Herrenschmidt wrote:
> On Mon, 2012-09-17 at 17:54 +0800, Tiejun Chen wrote:
> > -#ifdef CONFIG_PREEMPT
> > b restore
> >
> > /* N.B. the only way to get here is from the beq following ret_from_except. */
> > resume_kernel:
> > - /* check current_thread_info->preempt_count */
> > + /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
> > CURRENT_THREAD_INFO(r9, r1)
> > + lwz r8,TI_FLAGS(r9)
> > + andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
> > + beq+ 1f
> > +
> > + addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
> > +
> > + lwz r3,GPR1(r1)
> > + subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
> > + mr r4,r1 /* src: current exception frame */
> > + li r5,INT_FRAME_SIZE /* size: INT_FRAME_SIZE */
> > + li r6,0 /* start offset: 0 */
> > + mr r1,r3 /* Reroute the trampoline frame to r1 */
> > +
> > + /* Copy from the original to the trampoline. */
> > + li r6,0
>
> You just did that li r6,0 2 lines above :-) I'll fix it up manually
> while applying.
In fact the srwi can be dropped completely, we can just load r5 with the
divided value. Committed, will push later today, please test.
Cheers,
Ben.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame
2012-09-18 5:09 ` Benjamin Herrenschmidt
@ 2012-09-18 6:13 ` tiejun.chen
0 siblings, 0 replies; 8+ messages in thread
From: tiejun.chen @ 2012-09-18 6:13 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc-dev
On 09/18/2012 01:09 PM, Benjamin Herrenschmidt wrote:
> On Tue, 2012-09-18 at 15:05 +1000, Benjamin Herrenschmidt wrote:
>> On Mon, 2012-09-17 at 17:54 +0800, Tiejun Chen wrote:
>>> -#ifdef CONFIG_PREEMPT
>>> b restore
>>>
>>> /* N.B. the only way to get here is from the beq following ret_from_except. */
>>> resume_kernel:
>>> - /* check current_thread_info->preempt_count */
>>> + /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
>>> CURRENT_THREAD_INFO(r9, r1)
>>> + lwz r8,TI_FLAGS(r9)
>>> + andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
>>> + beq+ 1f
>>> +
>>> + addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
>>> +
>>> + lwz r3,GPR1(r1)
>>> + subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
>>> + mr r4,r1 /* src: current exception frame */
>>> + li r5,INT_FRAME_SIZE /* size: INT_FRAME_SIZE */
>>> + li r6,0 /* start offset: 0 */
>>> + mr r1,r3 /* Reroute the trampoline frame to r1 */
>>> +
>>> + /* Copy from the original to the trampoline. */
>>> + li r6,0
>>
>> You just did that li r6,0 2 lines above :-) I'll fix it up manually
>> while applying.
>
> In fact the srwi can be dropped completely, we can just load r5 with the
> divided value. Committed, will push later today, please test.
I retest to kprobe do_fork() and show_interrupts() with/without enabling
CONFIG_PREEMPT, separately, looks still work.
For 32-bit:
------------
+ /* Copy from the original to the trampoline. */
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception
frame */
+ mr r4,r1 /* src: current exception frame */
+ li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+ mtctr r5
+2: lwzx r0,r6,r4
+ stwx r0,r6,r3
+ addi r6,r6,4
+ bdnz 2b
And for 64-bit:
---------------
+ /* Copy from the original to the trampoline. */
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception
frame */
+ mr r4,r1 /* src: current exception frame */
+ li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+ mtctr r5
+2: ldx r0,r6,r4
+ stdx r0,r6,r3
+ addi r6,r6,8
+ bdnz 2b
Thanks
Tiejun
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2012-09-18 6:13 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-09-17 9:54 [v5][PATCH 1/3] powerpc/kprobe: introduce a new thread flag Tiejun Chen
2012-09-17 9:54 ` [v5][PATCH 2/3] powerpc/kprobe: complete kprobe and migrate exception frame Tiejun Chen
2012-09-17 10:02 ` David Laight
2012-09-17 10:19 ` tiejun.chen
2012-09-18 5:05 ` Benjamin Herrenschmidt
2012-09-18 5:09 ` Benjamin Herrenschmidt
2012-09-18 6:13 ` tiejun.chen
2012-09-17 9:54 ` [v5][PATCH 3/3] powerpc/kprobe: don't emulate store when kprobe stwu r1 Tiejun Chen
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).