linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4] PPC: use CURRENT_THREAD_INFO instead of open coded assembly
@ 2012-07-05 14:41 Stuart Yoder
  2012-07-10 22:54 ` Alexander Graf
  2012-07-12 22:45 ` Paul Mackerras
  0 siblings, 2 replies; 4+ messages in thread
From: Stuart Yoder @ 2012-07-05 14:41 UTC (permalink / raw)
  To: agraf, linuxppc-dev, benh; +Cc: sfr, Stuart Yoder

From: Stuart Yoder <stuart.yoder@freescale.com>

Signed-off-by: Stuart Yoder <stuart.yoder@freescale.com>
---
-v4: fixed build issues in exception-64s.h and exceptions-64s.S

 arch/powerpc/include/asm/exception-64s.h |    4 ++--
 arch/powerpc/include/asm/thread_info.h   |    6 ++++++
 arch/powerpc/kernel/entry_32.S           |   24 ++++++++++++------------
 arch/powerpc/kernel/entry_64.S           |   14 +++++++-------
 arch/powerpc/kernel/exceptions-64e.S     |    2 +-
 arch/powerpc/kernel/exceptions-64s.S     |    2 +-
 arch/powerpc/kernel/head_fsl_booke.S     |    2 +-
 arch/powerpc/kernel/idle_6xx.S           |    4 ++--
 arch/powerpc/kernel/idle_book3e.S        |    2 +-
 arch/powerpc/kernel/idle_e500.S          |    4 ++--
 arch/powerpc/kernel/idle_power4.S        |    2 +-
 arch/powerpc/kernel/misc_32.S            |    4 ++--
 arch/powerpc/kvm/bookehv_interrupts.S    |    6 +-----
 arch/powerpc/mm/hash_low_32.S            |    8 ++++----
 arch/powerpc/sysdev/6xx-suspend.S        |    2 +-
 15 files changed, 44 insertions(+), 42 deletions(-)

diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index d58fc4e..a43c147 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -293,7 +293,7 @@ label##_hv:								\
 
 #define RUNLATCH_ON				\
 BEGIN_FTR_SECTION				\
-	clrrdi	r3,r1,THREAD_SHIFT;		\
+	CURRENT_THREAD_INFO(r3, r1);		\
 	ld	r4,TI_LOCAL_FLAGS(r3);		\
 	andi.	r0,r4,_TLF_RUNLATCH;		\
 	beql	ppc64_runlatch_on_trampoline;	\
@@ -332,7 +332,7 @@ label##_common:							\
 #ifdef CONFIG_PPC_970_NAP
 #define FINISH_NAP				\
 BEGIN_FTR_SECTION				\
-	clrrdi	r11,r1,THREAD_SHIFT;		\
+	CURRENT_THREAD_INFO(r11, r1);		\
 	ld	r9,TI_LOCAL_FLAGS(r11);		\
 	andi.	r10,r9,_TLF_NAPPING;		\
 	bnel	power4_fixup_nap;		\
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 68831e9..faf9352 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -22,6 +22,12 @@
 
 #define THREAD_SIZE		(1 << THREAD_SHIFT)
 
+#ifdef CONFIG_PPC64
+#define CURRENT_THREAD_INFO(dest, sp)	clrrdi dest, sp, THREAD_SHIFT
+#else
+#define CURRENT_THREAD_INFO(dest, sp)	rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#endif
+
 #ifndef __ASSEMBLY__
 #include <linux/cache.h>
 #include <asm/processor.h>
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ba3aeb4..bad42e3 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -92,7 +92,7 @@ crit_transfer_to_handler:
 	mfspr	r8,SPRN_SPRG_THREAD
 	lwz	r0,KSP_LIMIT(r8)
 	stw	r0,SAVED_KSP_LIMIT(r11)
-	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r0, r1)
 	stw	r0,KSP_LIMIT(r8)
 	/* fall through */
 #endif
@@ -112,7 +112,7 @@ crit_transfer_to_handler:
 	mfspr	r8,SPRN_SPRG_THREAD
 	lwz	r0,KSP_LIMIT(r8)
 	stw	r0,saved_ksp_limit@l(0)
-	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r0, r1)
 	stw	r0,KSP_LIMIT(r8)
 	/* fall through */
 #endif
@@ -158,7 +158,7 @@ transfer_to_handler:
 	tophys(r11,r11)
 	addi	r11,r11,global_dbcr0@l
 #ifdef CONFIG_SMP
-	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r9,TI_CPU(r9)
 	slwi	r9,r9,3
 	add	r11,r11,r9
@@ -179,7 +179,7 @@ transfer_to_handler:
 	ble-	stack_ovf		/* then the kernel stack overflowed */
 5:
 #if defined(CONFIG_6xx) || defined(CONFIG_E500)
-	rlwinm	r9,r1,0,0,31-THREAD_SHIFT
+	CURRENT_THREAD_INFO(r9, r1)
 	tophys(r9,r9)			/* check local flags */
 	lwz	r12,TI_LOCAL_FLAGS(r9)
 	mtcrf	0x01,r12
@@ -333,7 +333,7 @@ _GLOBAL(DoSyscall)
 	mtmsr	r11
 1:
 #endif /* CONFIG_TRACE_IRQFLAGS */
-	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
+	CURRENT_THREAD_INFO(r10, r1)
 	lwz	r11,TI_FLAGS(r10)
 	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
 	bne-	syscall_dotrace
@@ -354,7 +354,7 @@ ret_from_syscall:
 	bl	do_show_syscall_exit
 #endif
 	mr	r6,r3
-	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
+	CURRENT_THREAD_INFO(r12, r1)
 	/* disable interrupts so current_thread_info()->flags can't change */
 	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
 	/* Note: We don't bother telling lockdep about it */
@@ -815,7 +815,7 @@ ret_from_except:
 
 user_exc_return:		/* r10 contains MSR_KERNEL here */
 	/* Check current_thread_info()->flags */
-	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r9,TI_FLAGS(r9)
 	andi.	r0,r9,_TIF_USER_WORK_MASK
 	bne	do_work
@@ -835,7 +835,7 @@ restore_user:
 /* N.B. the only way to get here is from the beq following ret_from_except. */
 resume_kernel:
 	/* check current_thread_info->preempt_count */
-	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r0,TI_PREEMPT(r9)
 	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
 	bne	restore
@@ -852,7 +852,7 @@ resume_kernel:
 	bl	trace_hardirqs_off
 #endif
 1:	bl	preempt_schedule_irq
-	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r3,TI_FLAGS(r9)
 	andi.	r0,r3,_TIF_NEED_RESCHED
 	bne-	1b
@@ -1122,7 +1122,7 @@ ret_from_debug_exc:
 	lwz	r10,SAVED_KSP_LIMIT(r1)
 	stw	r10,KSP_LIMIT(r9)
 	lwz	r9,THREAD_INFO-THREAD(r9)
-	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r10, r1)
 	lwz	r10,TI_PREEMPT(r10)
 	stw	r10,TI_PREEMPT(r9)
 	RESTORE_xSRR(SRR0,SRR1);
@@ -1156,7 +1156,7 @@ load_dbcr0:
 	lis	r11,global_dbcr0@ha
 	addi	r11,r11,global_dbcr0@l
 #ifdef CONFIG_SMP
-	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r9,TI_CPU(r9)
 	slwi	r9,r9,3
 	add	r11,r11,r9
@@ -1197,7 +1197,7 @@ recheck:
 	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 	SYNC
 	MTMSRD(r10)		/* disable interrupts */
-	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r9,TI_FLAGS(r9)
 	andi.	r0,r9,_TIF_NEED_RESCHED
 	bne-	do_resched
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ed1718f..ba943b9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -146,7 +146,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 	REST_2GPRS(7,r1)
 	addi	r9,r1,STACK_FRAME_OVERHEAD
 #endif
-	clrrdi	r11,r1,THREAD_SHIFT
+	CURRENT_THREAD_INFO(r11, r1)
 	ld	r10,TI_FLAGS(r11)
 	andi.	r11,r10,_TIF_SYSCALL_T_OR_A
 	bne-	syscall_dotrace
@@ -181,7 +181,7 @@ syscall_exit:
 	bl	.do_show_syscall_exit
 	ld	r3,RESULT(r1)
 #endif
-	clrrdi	r12,r1,THREAD_SHIFT
+	CURRENT_THREAD_INFO(r12, r1)
 
 	ld	r8,_MSR(r1)
 #ifdef CONFIG_PPC_BOOK3S
@@ -262,7 +262,7 @@ syscall_dotrace:
 	ld	r7,GPR7(r1)
 	ld	r8,GPR8(r1)
 	addi	r9,r1,STACK_FRAME_OVERHEAD
-	clrrdi	r10,r1,THREAD_SHIFT
+	CURRENT_THREAD_INFO(r10, r1)
 	ld	r10,TI_FLAGS(r10)
 	b	.Lsyscall_dotrace_cont
 
@@ -499,7 +499,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 2:
 #endif /* !CONFIG_PPC_BOOK3S */
 
-	clrrdi	r7,r8,THREAD_SHIFT	/* base of new stack */
+	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
 	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
 	   because we don't need to leave the 288-byte ABI gap at the
 	   top of the kernel stack. */
@@ -559,7 +559,7 @@ _GLOBAL(ret_from_except_lite)
 #endif /* CONFIG_PPC_BOOK3E */
 
 #ifdef CONFIG_PREEMPT
-	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
+	CURRENT_THREAD_INFO(r9, r1)
 	li	r0,_TIF_NEED_RESCHED	/* bits to check */
 	ld	r3,_MSR(r1)
 	ld	r4,TI_FLAGS(r9)
@@ -574,7 +574,7 @@ _GLOBAL(ret_from_except_lite)
 	beq	restore		/* if not, just restore regs and return */
 
 	/* Check current_thread_info()->flags */
-	clrrdi	r9,r1,THREAD_SHIFT
+	CURRENT_THREAD_INFO(r9, r1)
 	ld	r4,TI_FLAGS(r9)
 	andi.	r0,r4,_TIF_USER_WORK_MASK
 	bne	do_work
@@ -782,7 +782,7 @@ do_work:
 1:	bl	.preempt_schedule_irq
 
 	/* Re-test flags and eventually loop */
-	clrrdi	r9,r1,THREAD_SHIFT
+	CURRENT_THREAD_INFO(r9, r1)
 	ld	r4,TI_FLAGS(r9)
 	andi.	r0,r4,_TIF_NEED_RESCHED
 	bne	1b
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 7215cc2..2f86db6 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -222,7 +222,7 @@ exc_##n##_bad_stack:							    \
  * interrupts happen before the wait instruction.
  */
 #define CHECK_NAPPING()							\
-	clrrdi	r11,r1,THREAD_SHIFT;					\
+	CURRENT_THREAD_INFO(r11, r1);					\
 	ld	r10,TI_LOCAL_FLAGS(r11);				\
 	andi.	r9,r10,_TLF_NAPPING;					\
 	beq+	1f;							\
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 1c06d29..8ad3468 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -851,7 +851,7 @@ BEGIN_FTR_SECTION
 	bne-	do_ste_alloc		/* If so handle it */
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
 
-	clrrdi	r11,r1,THREAD_SHIFT
+	CURRENT_THREAD_INFO(r11, r1)
 	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
 	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
 	bne	77f			/* then don't call hash_page now */
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 1f4434a..7e7bd88 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -192,7 +192,7 @@ _ENTRY(__early_start)
 	li	r0,0
 	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
 
-	rlwinm  r22,r1,0,0,31-THREAD_SHIFT      /* current thread_info */
+	CURRENT_THREAD_INFO(r22, r1)
 	stw	r24, TI_CPU(r22)
 
 	bl	early_init
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 15c611d..1686916 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -135,7 +135,7 @@ BEGIN_FTR_SECTION
 	DSSALL
 	sync
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-	rlwinm	r9,r1,0,0,31-THREAD_SHIFT	/* current thread_info */
+	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r8,TI_LOCAL_FLAGS(r9)	/* set napping bit */
 	ori	r8,r8,_TLF_NAPPING	/* so when we take an exception */
 	stw	r8,TI_LOCAL_FLAGS(r9)	/* it will return to our caller */
@@ -158,7 +158,7 @@ _GLOBAL(power_save_ppc32_restore)
 	stw	r9,_NIP(r11)		/* make it do a blr */
 
 #ifdef CONFIG_SMP
-	rlwinm	r12,r11,0,0,31-THREAD_SHIFT
+	CURRENT_THREAD_INFO(r12, r11)
 	lwz	r11,TI_CPU(r12)		/* get cpu number * 4 */
 	slwi	r11,r11,2
 #else
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
index ff007b5..4c7cb400 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -60,7 +60,7 @@ _GLOBAL(book3e_idle)
 1:	/* Let's set the _TLF_NAPPING flag so interrupts make us return
 	 * to the right spot
 	*/
-	clrrdi	r11,r1,THREAD_SHIFT
+	CURRENT_THREAD_INFO(r11, r1)
 	ld	r10,TI_LOCAL_FLAGS(r11)
 	ori	r10,r10,_TLF_NAPPING
 	std	r10,TI_LOCAL_FLAGS(r11)
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index 4f0ab85..1544866 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -21,7 +21,7 @@
 	.text
 
 _GLOBAL(e500_idle)
-	rlwinm	r3,r1,0,0,31-THREAD_SHIFT	/* current thread_info */
+	CURRENT_THREAD_INFO(r3, r1)
 	lwz	r4,TI_LOCAL_FLAGS(r3)	/* set napping bit */
 	ori	r4,r4,_TLF_NAPPING	/* so when we take an exception */
 	stw	r4,TI_LOCAL_FLAGS(r3)	/* it will return to our caller */
@@ -96,7 +96,7 @@ _GLOBAL(power_save_ppc32_restore)
 	stw	r9,_NIP(r11)		/* make it do a blr */
 
 #ifdef CONFIG_SMP
-	rlwinm	r12,r1,0,0,31-THREAD_SHIFT
+	CURRENT_THREAD_INFO(r12, r1)
 	lwz	r11,TI_CPU(r12)		/* get cpu number * 4 */
 	slwi	r11,r11,2
 #else
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 2c71b0f..e3edaa1 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -59,7 +59,7 @@ BEGIN_FTR_SECTION
 	DSSALL
 	sync
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-	clrrdi	r9,r1,THREAD_SHIFT	/* current thread_info */
+	CURRENT_THREAD_INFO(r9, r1)
 	ld	r8,TI_LOCAL_FLAGS(r9)	/* set napping bit */
 	ori	r8,r8,_TLF_NAPPING	/* so when we take an exception */
 	std	r8,TI_LOCAL_FLAGS(r9)	/* it will return to our caller */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 386d57f..407e293 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -179,7 +179,7 @@ _GLOBAL(low_choose_750fx_pll)
 	mtspr	SPRN_HID1,r4
 
 	/* Store new HID1 image */
-	rlwinm	r6,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r6, r1)
 	lwz	r6,TI_CPU(r6)
 	slwi	r6,r6,2
 	addis	r6,r6,nap_save_hid1@ha
@@ -699,7 +699,7 @@ _GLOBAL(kernel_thread)
 #ifdef CONFIG_SMP
 _GLOBAL(start_secondary_resume)
 	/* Reset stack */
-	rlwinm	r1,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
+	CURRENT_THREAD_INFO(r1, r1)
 	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 	li	r3,0
 	stw	r3,0(r1)		/* Zero the stack frame pointer	*/
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 0fa2ef7..c8c7a04 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -161,11 +161,7 @@
 	mtspr	SPRN_EPLC, r8
 
 	/* disable preemption, so we are sure we hit the fixup handler */
-#ifdef CONFIG_PPC64
-	clrrdi	r8,r1,THREAD_SHIFT
-#else
-	rlwinm	r8,r1,0,0,31-THREAD_SHIFT       /* current thread_info */
-#endif
+	CURRENT_THREAD_INFO(r8, r1)
 	li	r7, 1
 	stw	r7, TI_PREEMPT(r8)
 
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index b13d589..115347f 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -184,7 +184,7 @@ _GLOBAL(add_hash_page)
 	add	r3,r3,r0		/* note create_hpte trims to 24 bits */
 
 #ifdef CONFIG_SMP
-	rlwinm	r8,r1,0,0,(31-THREAD_SHIFT) /* use cpu number to make tag */
+	CURRENT_THREAD_INFO(r8, r1)	/* use cpu number to make tag */
 	lwz	r8,TI_CPU(r8)		/* to go in mmu_hash_lock */
 	oris	r8,r8,12
 #endif /* CONFIG_SMP */
@@ -545,7 +545,7 @@ _GLOBAL(flush_hash_pages)
 #ifdef CONFIG_SMP
 	addis	r9,r7,mmu_hash_lock@ha
 	addi	r9,r9,mmu_hash_lock@l
-	rlwinm	r8,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r8, r1)
 	add	r8,r8,r7
 	lwz	r8,TI_CPU(r8)
 	oris	r8,r8,9
@@ -639,7 +639,7 @@ _GLOBAL(flush_hash_patch_B)
  */
 _GLOBAL(_tlbie)
 #ifdef CONFIG_SMP
-	rlwinm	r8,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r8, r1)
 	lwz	r8,TI_CPU(r8)
 	oris	r8,r8,11
 	mfmsr	r10
@@ -677,7 +677,7 @@ _GLOBAL(_tlbie)
  */
 _GLOBAL(_tlbia)
 #if defined(CONFIG_SMP)
-	rlwinm	r8,r1,0,0,(31-THREAD_SHIFT)
+	CURRENT_THREAD_INFO(r8, r1)
 	lwz	r8,TI_CPU(r8)
 	oris	r8,r8,10
 	mfmsr	r10
diff --git a/arch/powerpc/sysdev/6xx-suspend.S b/arch/powerpc/sysdev/6xx-suspend.S
index 21cda08..cf48e9c 100644
--- a/arch/powerpc/sysdev/6xx-suspend.S
+++ b/arch/powerpc/sysdev/6xx-suspend.S
@@ -29,7 +29,7 @@ _GLOBAL(mpc6xx_enter_standby)
 	ori	r5, r5, ret_from_standby@l
 	mtlr	r5
 
-	rlwinm	r5, r1, 0, 0, 31-THREAD_SHIFT
+	CURRENT_THREAD_INFO(r5, r1)
 	lwz	r6, TI_LOCAL_FLAGS(r5)
 	ori	r6, r6, _TLF_SLEEPING
 	stw	r6, TI_LOCAL_FLAGS(r5)
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v4] PPC: use CURRENT_THREAD_INFO instead of open coded assembly
  2012-07-05 14:41 [PATCH v4] PPC: use CURRENT_THREAD_INFO instead of open coded assembly Stuart Yoder
@ 2012-07-10 22:54 ` Alexander Graf
  2012-07-12 22:45 ` Paul Mackerras
  1 sibling, 0 replies; 4+ messages in thread
From: Alexander Graf @ 2012-07-10 22:54 UTC (permalink / raw)
  To: Stuart Yoder; +Cc: linuxppc-dev, sfr


On 05.07.2012, at 16:41, Stuart Yoder wrote:

> From: Stuart Yoder <stuart.yoder@freescale.com>
>=20
> Signed-off-by: Stuart Yoder <stuart.yoder@freescale.com>

Ben, ping?


Alex

> ---
> -v4: fixed build issues in exception-64s.h and exceptions-64s.S
>=20
> arch/powerpc/include/asm/exception-64s.h |    4 ++--
> arch/powerpc/include/asm/thread_info.h   |    6 ++++++
> arch/powerpc/kernel/entry_32.S           |   24 =
++++++++++++------------
> arch/powerpc/kernel/entry_64.S           |   14 +++++++-------
> arch/powerpc/kernel/exceptions-64e.S     |    2 +-
> arch/powerpc/kernel/exceptions-64s.S     |    2 +-
> arch/powerpc/kernel/head_fsl_booke.S     |    2 +-
> arch/powerpc/kernel/idle_6xx.S           |    4 ++--
> arch/powerpc/kernel/idle_book3e.S        |    2 +-
> arch/powerpc/kernel/idle_e500.S          |    4 ++--
> arch/powerpc/kernel/idle_power4.S        |    2 +-
> arch/powerpc/kernel/misc_32.S            |    4 ++--
> arch/powerpc/kvm/bookehv_interrupts.S    |    6 +-----
> arch/powerpc/mm/hash_low_32.S            |    8 ++++----
> arch/powerpc/sysdev/6xx-suspend.S        |    2 +-
> 15 files changed, 44 insertions(+), 42 deletions(-)
>=20
> diff --git a/arch/powerpc/include/asm/exception-64s.h =
b/arch/powerpc/include/asm/exception-64s.h
> index d58fc4e..a43c147 100644
> --- a/arch/powerpc/include/asm/exception-64s.h
> +++ b/arch/powerpc/include/asm/exception-64s.h
> @@ -293,7 +293,7 @@ label##_hv:						=
		\
>=20
> #define RUNLATCH_ON				\
> BEGIN_FTR_SECTION				\
> -	clrrdi	r3,r1,THREAD_SHIFT;		\
> +	CURRENT_THREAD_INFO(r3, r1);		\
> 	ld	r4,TI_LOCAL_FLAGS(r3);		\
> 	andi.	r0,r4,_TLF_RUNLATCH;		\
> 	beql	ppc64_runlatch_on_trampoline;	\
> @@ -332,7 +332,7 @@ label##_common:						=
	\
> #ifdef CONFIG_PPC_970_NAP
> #define FINISH_NAP				\
> BEGIN_FTR_SECTION				\
> -	clrrdi	r11,r1,THREAD_SHIFT;		\
> +	CURRENT_THREAD_INFO(r11, r1);		\
> 	ld	r9,TI_LOCAL_FLAGS(r11);		\
> 	andi.	r10,r9,_TLF_NAPPING;		\
> 	bnel	power4_fixup_nap;		\
> diff --git a/arch/powerpc/include/asm/thread_info.h =
b/arch/powerpc/include/asm/thread_info.h
> index 68831e9..faf9352 100644
> --- a/arch/powerpc/include/asm/thread_info.h
> +++ b/arch/powerpc/include/asm/thread_info.h
> @@ -22,6 +22,12 @@
>=20
> #define THREAD_SIZE		(1 << THREAD_SHIFT)
>=20
> +#ifdef CONFIG_PPC64
> +#define CURRENT_THREAD_INFO(dest, sp)	clrrdi dest, sp, =
THREAD_SHIFT
> +#else
> +#define CURRENT_THREAD_INFO(dest, sp)	rlwinm dest, sp, 0, 0, =
31-THREAD_SHIFT
> +#endif
> +
> #ifndef __ASSEMBLY__
> #include <linux/cache.h>
> #include <asm/processor.h>
> diff --git a/arch/powerpc/kernel/entry_32.S =
b/arch/powerpc/kernel/entry_32.S
> index ba3aeb4..bad42e3 100644
> --- a/arch/powerpc/kernel/entry_32.S
> +++ b/arch/powerpc/kernel/entry_32.S
> @@ -92,7 +92,7 @@ crit_transfer_to_handler:
> 	mfspr	r8,SPRN_SPRG_THREAD
> 	lwz	r0,KSP_LIMIT(r8)
> 	stw	r0,SAVED_KSP_LIMIT(r11)
> -	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r0, r1)
> 	stw	r0,KSP_LIMIT(r8)
> 	/* fall through */
> #endif
> @@ -112,7 +112,7 @@ crit_transfer_to_handler:
> 	mfspr	r8,SPRN_SPRG_THREAD
> 	lwz	r0,KSP_LIMIT(r8)
> 	stw	r0,saved_ksp_limit@l(0)
> -	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r0, r1)
> 	stw	r0,KSP_LIMIT(r8)
> 	/* fall through */
> #endif
> @@ -158,7 +158,7 @@ transfer_to_handler:
> 	tophys(r11,r11)
> 	addi	r11,r11,global_dbcr0@l
> #ifdef CONFIG_SMP
> -	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r9, r1)
> 	lwz	r9,TI_CPU(r9)
> 	slwi	r9,r9,3
> 	add	r11,r11,r9
> @@ -179,7 +179,7 @@ transfer_to_handler:
> 	ble-	stack_ovf		/* then the kernel stack =
overflowed */
> 5:
> #if defined(CONFIG_6xx) || defined(CONFIG_E500)
> -	rlwinm	r9,r1,0,0,31-THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r9, r1)
> 	tophys(r9,r9)			/* check local flags */
> 	lwz	r12,TI_LOCAL_FLAGS(r9)
> 	mtcrf	0x01,r12
> @@ -333,7 +333,7 @@ _GLOBAL(DoSyscall)
> 	mtmsr	r11
> 1:
> #endif /* CONFIG_TRACE_IRQFLAGS */
> -	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() =
*/
> +	CURRENT_THREAD_INFO(r10, r1)
> 	lwz	r11,TI_FLAGS(r10)
> 	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
> 	bne-	syscall_dotrace
> @@ -354,7 +354,7 @@ ret_from_syscall:
> 	bl	do_show_syscall_exit
> #endif
> 	mr	r6,r3
> -	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() =
*/
> +	CURRENT_THREAD_INFO(r12, r1)
> 	/* disable interrupts so current_thread_info()->flags can't =
change */
> 	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
> 	/* Note: We don't bother telling lockdep about it */
> @@ -815,7 +815,7 @@ ret_from_except:
>=20
> user_exc_return:		/* r10 contains MSR_KERNEL here */
> 	/* Check current_thread_info()->flags */
> -	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r9, r1)
> 	lwz	r9,TI_FLAGS(r9)
> 	andi.	r0,r9,_TIF_USER_WORK_MASK
> 	bne	do_work
> @@ -835,7 +835,7 @@ restore_user:
> /* N.B. the only way to get here is from the beq following =
ret_from_except. */
> resume_kernel:
> 	/* check current_thread_info->preempt_count */
> -	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r9, r1)
> 	lwz	r0,TI_PREEMPT(r9)
> 	cmpwi	0,r0,0		/* if non-zero, just restore regs and =
return */
> 	bne	restore
> @@ -852,7 +852,7 @@ resume_kernel:
> 	bl	trace_hardirqs_off
> #endif
> 1:	bl	preempt_schedule_irq
> -	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r9, r1)
> 	lwz	r3,TI_FLAGS(r9)
> 	andi.	r0,r3,_TIF_NEED_RESCHED
> 	bne-	1b
> @@ -1122,7 +1122,7 @@ ret_from_debug_exc:
> 	lwz	r10,SAVED_KSP_LIMIT(r1)
> 	stw	r10,KSP_LIMIT(r9)
> 	lwz	r9,THREAD_INFO-THREAD(r9)
> -	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r10, r1)
> 	lwz	r10,TI_PREEMPT(r10)
> 	stw	r10,TI_PREEMPT(r9)
> 	RESTORE_xSRR(SRR0,SRR1);
> @@ -1156,7 +1156,7 @@ load_dbcr0:
> 	lis	r11,global_dbcr0@ha
> 	addi	r11,r11,global_dbcr0@l
> #ifdef CONFIG_SMP
> -	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r9, r1)
> 	lwz	r9,TI_CPU(r9)
> 	slwi	r9,r9,3
> 	add	r11,r11,r9
> @@ -1197,7 +1197,7 @@ recheck:
> 	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
> 	SYNC
> 	MTMSRD(r10)		/* disable interrupts */
> -	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r9, r1)
> 	lwz	r9,TI_FLAGS(r9)
> 	andi.	r0,r9,_TIF_NEED_RESCHED
> 	bne-	do_resched
> diff --git a/arch/powerpc/kernel/entry_64.S =
b/arch/powerpc/kernel/entry_64.S
> index ed1718f..ba943b9 100644
> --- a/arch/powerpc/kernel/entry_64.S
> +++ b/arch/powerpc/kernel/entry_64.S
> @@ -146,7 +146,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
> 	REST_2GPRS(7,r1)
> 	addi	r9,r1,STACK_FRAME_OVERHEAD
> #endif
> -	clrrdi	r11,r1,THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r11, r1)
> 	ld	r10,TI_FLAGS(r11)
> 	andi.	r11,r10,_TIF_SYSCALL_T_OR_A
> 	bne-	syscall_dotrace
> @@ -181,7 +181,7 @@ syscall_exit:
> 	bl	.do_show_syscall_exit
> 	ld	r3,RESULT(r1)
> #endif
> -	clrrdi	r12,r1,THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r12, r1)
>=20
> 	ld	r8,_MSR(r1)
> #ifdef CONFIG_PPC_BOOK3S
> @@ -262,7 +262,7 @@ syscall_dotrace:
> 	ld	r7,GPR7(r1)
> 	ld	r8,GPR8(r1)
> 	addi	r9,r1,STACK_FRAME_OVERHEAD
> -	clrrdi	r10,r1,THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r10, r1)
> 	ld	r10,TI_FLAGS(r10)
> 	b	.Lsyscall_dotrace_cont
>=20
> @@ -499,7 +499,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
> 2:
> #endif /* !CONFIG_PPC_BOOK3S */
>=20
> -	clrrdi	r7,r8,THREAD_SHIFT	/* base of new stack */
> +	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
> 	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
> 	   because we don't need to leave the 288-byte ABI gap at the
> 	   top of the kernel stack. */
> @@ -559,7 +559,7 @@ _GLOBAL(ret_from_except_lite)
> #endif /* CONFIG_PPC_BOOK3E */
>=20
> #ifdef CONFIG_PREEMPT
> -	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
> +	CURRENT_THREAD_INFO(r9, r1)
> 	li	r0,_TIF_NEED_RESCHED	/* bits to check */
> 	ld	r3,_MSR(r1)
> 	ld	r4,TI_FLAGS(r9)
> @@ -574,7 +574,7 @@ _GLOBAL(ret_from_except_lite)
> 	beq	restore		/* if not, just restore regs and return =
*/
>=20
> 	/* Check current_thread_info()->flags */
> -	clrrdi	r9,r1,THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r9, r1)
> 	ld	r4,TI_FLAGS(r9)
> 	andi.	r0,r4,_TIF_USER_WORK_MASK
> 	bne	do_work
> @@ -782,7 +782,7 @@ do_work:
> 1:	bl	.preempt_schedule_irq
>=20
> 	/* Re-test flags and eventually loop */
> -	clrrdi	r9,r1,THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r9, r1)
> 	ld	r4,TI_FLAGS(r9)
> 	andi.	r0,r4,_TIF_NEED_RESCHED
> 	bne	1b
> diff --git a/arch/powerpc/kernel/exceptions-64e.S =
b/arch/powerpc/kernel/exceptions-64e.S
> index 7215cc2..2f86db6 100644
> --- a/arch/powerpc/kernel/exceptions-64e.S
> +++ b/arch/powerpc/kernel/exceptions-64e.S
> @@ -222,7 +222,7 @@ exc_##n##_bad_stack:					=
		    \
>  * interrupts happen before the wait instruction.
>  */
> #define CHECK_NAPPING()							=
\
> -	clrrdi	r11,r1,THREAD_SHIFT;					=
\
> +	CURRENT_THREAD_INFO(r11, r1);					=
\
> 	ld	r10,TI_LOCAL_FLAGS(r11);				=
\
> 	andi.	r9,r10,_TLF_NAPPING;					=
\
> 	beq+	1f;							=
\
> diff --git a/arch/powerpc/kernel/exceptions-64s.S =
b/arch/powerpc/kernel/exceptions-64s.S
> index 1c06d29..8ad3468 100644
> --- a/arch/powerpc/kernel/exceptions-64s.S
> +++ b/arch/powerpc/kernel/exceptions-64s.S
> @@ -851,7 +851,7 @@ BEGIN_FTR_SECTION
> 	bne-	do_ste_alloc		/* If so handle it */
> END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
>=20
> -	clrrdi	r11,r1,THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r11, r1)
> 	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
> 	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when =
soft-disabled) */
> 	bne	77f			/* then don't call hash_page now =
*/
> diff --git a/arch/powerpc/kernel/head_fsl_booke.S =
b/arch/powerpc/kernel/head_fsl_booke.S
> index 1f4434a..7e7bd88 100644
> --- a/arch/powerpc/kernel/head_fsl_booke.S
> +++ b/arch/powerpc/kernel/head_fsl_booke.S
> @@ -192,7 +192,7 @@ _ENTRY(__early_start)
> 	li	r0,0
> 	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
>=20
> -	rlwinm  r22,r1,0,0,31-THREAD_SHIFT      /* current thread_info =
*/
> +	CURRENT_THREAD_INFO(r22, r1)
> 	stw	r24, TI_CPU(r22)
>=20
> 	bl	early_init
> diff --git a/arch/powerpc/kernel/idle_6xx.S =
b/arch/powerpc/kernel/idle_6xx.S
> index 15c611d..1686916 100644
> --- a/arch/powerpc/kernel/idle_6xx.S
> +++ b/arch/powerpc/kernel/idle_6xx.S
> @@ -135,7 +135,7 @@ BEGIN_FTR_SECTION
> 	DSSALL
> 	sync
> END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
> -	rlwinm	r9,r1,0,0,31-THREAD_SHIFT	/* current thread_info =
*/
> +	CURRENT_THREAD_INFO(r9, r1)
> 	lwz	r8,TI_LOCAL_FLAGS(r9)	/* set napping bit */
> 	ori	r8,r8,_TLF_NAPPING	/* so when we take an exception =
*/
> 	stw	r8,TI_LOCAL_FLAGS(r9)	/* it will return to our caller =
*/
> @@ -158,7 +158,7 @@ _GLOBAL(power_save_ppc32_restore)
> 	stw	r9,_NIP(r11)		/* make it do a blr */
>=20
> #ifdef CONFIG_SMP
> -	rlwinm	r12,r11,0,0,31-THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r12, r11)
> 	lwz	r11,TI_CPU(r12)		/* get cpu number * 4 */
> 	slwi	r11,r11,2
> #else
> diff --git a/arch/powerpc/kernel/idle_book3e.S =
b/arch/powerpc/kernel/idle_book3e.S
> index ff007b5..4c7cb400 100644
> --- a/arch/powerpc/kernel/idle_book3e.S
> +++ b/arch/powerpc/kernel/idle_book3e.S
> @@ -60,7 +60,7 @@ _GLOBAL(book3e_idle)
> 1:	/* Let's set the _TLF_NAPPING flag so interrupts make us return
> 	 * to the right spot
> 	*/
> -	clrrdi	r11,r1,THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r11, r1)
> 	ld	r10,TI_LOCAL_FLAGS(r11)
> 	ori	r10,r10,_TLF_NAPPING
> 	std	r10,TI_LOCAL_FLAGS(r11)
> diff --git a/arch/powerpc/kernel/idle_e500.S =
b/arch/powerpc/kernel/idle_e500.S
> index 4f0ab85..1544866 100644
> --- a/arch/powerpc/kernel/idle_e500.S
> +++ b/arch/powerpc/kernel/idle_e500.S
> @@ -21,7 +21,7 @@
> 	.text
>=20
> _GLOBAL(e500_idle)
> -	rlwinm	r3,r1,0,0,31-THREAD_SHIFT	/* current thread_info =
*/
> +	CURRENT_THREAD_INFO(r3, r1)
> 	lwz	r4,TI_LOCAL_FLAGS(r3)	/* set napping bit */
> 	ori	r4,r4,_TLF_NAPPING	/* so when we take an exception =
*/
> 	stw	r4,TI_LOCAL_FLAGS(r3)	/* it will return to our caller =
*/
> @@ -96,7 +96,7 @@ _GLOBAL(power_save_ppc32_restore)
> 	stw	r9,_NIP(r11)		/* make it do a blr */
>=20
> #ifdef CONFIG_SMP
> -	rlwinm	r12,r1,0,0,31-THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r12, r1)
> 	lwz	r11,TI_CPU(r12)		/* get cpu number * 4 */
> 	slwi	r11,r11,2
> #else
> diff --git a/arch/powerpc/kernel/idle_power4.S =
b/arch/powerpc/kernel/idle_power4.S
> index 2c71b0f..e3edaa1 100644
> --- a/arch/powerpc/kernel/idle_power4.S
> +++ b/arch/powerpc/kernel/idle_power4.S
> @@ -59,7 +59,7 @@ BEGIN_FTR_SECTION
> 	DSSALL
> 	sync
> END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
> -	clrrdi	r9,r1,THREAD_SHIFT	/* current thread_info */
> +	CURRENT_THREAD_INFO(r9, r1)
> 	ld	r8,TI_LOCAL_FLAGS(r9)	/* set napping bit */
> 	ori	r8,r8,_TLF_NAPPING	/* so when we take an exception =
*/
> 	std	r8,TI_LOCAL_FLAGS(r9)	/* it will return to our caller =
*/
> diff --git a/arch/powerpc/kernel/misc_32.S =
b/arch/powerpc/kernel/misc_32.S
> index 386d57f..407e293 100644
> --- a/arch/powerpc/kernel/misc_32.S
> +++ b/arch/powerpc/kernel/misc_32.S
> @@ -179,7 +179,7 @@ _GLOBAL(low_choose_750fx_pll)
> 	mtspr	SPRN_HID1,r4
>=20
> 	/* Store new HID1 image */
> -	rlwinm	r6,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r6, r1)
> 	lwz	r6,TI_CPU(r6)
> 	slwi	r6,r6,2
> 	addis	r6,r6,nap_save_hid1@ha
> @@ -699,7 +699,7 @@ _GLOBAL(kernel_thread)
> #ifdef CONFIG_SMP
> _GLOBAL(start_secondary_resume)
> 	/* Reset stack */
> -	rlwinm	r1,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() =
*/
> +	CURRENT_THREAD_INFO(r1, r1)
> 	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
> 	li	r3,0
> 	stw	r3,0(r1)		/* Zero the stack frame pointer	=
*/
> diff --git a/arch/powerpc/kvm/bookehv_interrupts.S =
b/arch/powerpc/kvm/bookehv_interrupts.S
> index 0fa2ef7..c8c7a04 100644
> --- a/arch/powerpc/kvm/bookehv_interrupts.S
> +++ b/arch/powerpc/kvm/bookehv_interrupts.S
> @@ -161,11 +161,7 @@
> 	mtspr	SPRN_EPLC, r8
>=20
> 	/* disable preemption, so we are sure we hit the fixup handler =
*/
> -#ifdef CONFIG_PPC64
> -	clrrdi	r8,r1,THREAD_SHIFT
> -#else
> -	rlwinm	r8,r1,0,0,31-THREAD_SHIFT       /* current thread_info =
*/
> -#endif
> +	CURRENT_THREAD_INFO(r8, r1)
> 	li	r7, 1
> 	stw	r7, TI_PREEMPT(r8)
>=20
> diff --git a/arch/powerpc/mm/hash_low_32.S =
b/arch/powerpc/mm/hash_low_32.S
> index b13d589..115347f 100644
> --- a/arch/powerpc/mm/hash_low_32.S
> +++ b/arch/powerpc/mm/hash_low_32.S
> @@ -184,7 +184,7 @@ _GLOBAL(add_hash_page)
> 	add	r3,r3,r0		/* note create_hpte trims to 24 =
bits */
>=20
> #ifdef CONFIG_SMP
> -	rlwinm	r8,r1,0,0,(31-THREAD_SHIFT) /* use cpu number to make =
tag */
> +	CURRENT_THREAD_INFO(r8, r1)	/* use cpu number to make tag */
> 	lwz	r8,TI_CPU(r8)		/* to go in mmu_hash_lock */
> 	oris	r8,r8,12
> #endif /* CONFIG_SMP */
> @@ -545,7 +545,7 @@ _GLOBAL(flush_hash_pages)
> #ifdef CONFIG_SMP
> 	addis	r9,r7,mmu_hash_lock@ha
> 	addi	r9,r9,mmu_hash_lock@l
> -	rlwinm	r8,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r8, r1)
> 	add	r8,r8,r7
> 	lwz	r8,TI_CPU(r8)
> 	oris	r8,r8,9
> @@ -639,7 +639,7 @@ _GLOBAL(flush_hash_patch_B)
>  */
> _GLOBAL(_tlbie)
> #ifdef CONFIG_SMP
> -	rlwinm	r8,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r8, r1)
> 	lwz	r8,TI_CPU(r8)
> 	oris	r8,r8,11
> 	mfmsr	r10
> @@ -677,7 +677,7 @@ _GLOBAL(_tlbie)
>  */
> _GLOBAL(_tlbia)
> #if defined(CONFIG_SMP)
> -	rlwinm	r8,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r8, r1)
> 	lwz	r8,TI_CPU(r8)
> 	oris	r8,r8,10
> 	mfmsr	r10
> diff --git a/arch/powerpc/sysdev/6xx-suspend.S =
b/arch/powerpc/sysdev/6xx-suspend.S
> index 21cda08..cf48e9c 100644
> --- a/arch/powerpc/sysdev/6xx-suspend.S
> +++ b/arch/powerpc/sysdev/6xx-suspend.S
> @@ -29,7 +29,7 @@ _GLOBAL(mpc6xx_enter_standby)
> 	ori	r5, r5, ret_from_standby@l
> 	mtlr	r5
>=20
> -	rlwinm	r5, r1, 0, 0, 31-THREAD_SHIFT
> +	CURRENT_THREAD_INFO(r5, r1)
> 	lwz	r6, TI_LOCAL_FLAGS(r5)
> 	ori	r6, r6, _TLF_SLEEPING
> 	stw	r6, TI_LOCAL_FLAGS(r5)
> --=20
> 1.7.3.4
>=20
>=20

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v4] PPC: use CURRENT_THREAD_INFO instead of open coded assembly
  2012-07-05 14:41 [PATCH v4] PPC: use CURRENT_THREAD_INFO instead of open coded assembly Stuart Yoder
  2012-07-10 22:54 ` Alexander Graf
@ 2012-07-12 22:45 ` Paul Mackerras
  2012-07-13  3:33   ` Benjamin Herrenschmidt
  1 sibling, 1 reply; 4+ messages in thread
From: Paul Mackerras @ 2012-07-12 22:45 UTC (permalink / raw)
  To: Stuart Yoder; +Cc: linuxppc-dev, agraf, sfr

On Thu, Jul 05, 2012 at 09:41:35AM -0500, Stuart Yoder wrote:

> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
> index ba3aeb4..bad42e3 100644
> --- a/arch/powerpc/kernel/entry_32.S
> +++ b/arch/powerpc/kernel/entry_32.S
> @@ -92,7 +92,7 @@ crit_transfer_to_handler:
>  	mfspr	r8,SPRN_SPRG_THREAD
>  	lwz	r0,KSP_LIMIT(r8)
>  	stw	r0,SAVED_KSP_LIMIT(r11)
> -	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r0, r1)
>  	stw	r0,KSP_LIMIT(r8)
>  	/* fall through */
>  #endif
> @@ -112,7 +112,7 @@ crit_transfer_to_handler:
>  	mfspr	r8,SPRN_SPRG_THREAD
>  	lwz	r0,KSP_LIMIT(r8)
>  	stw	r0,saved_ksp_limit@l(0)
> -	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
> +	CURRENT_THREAD_INFO(r0, r1)
>  	stw	r0,KSP_LIMIT(r8)
>  	/* fall through */
>  #endif

Do you really mean to replace a rlwimi with a rlwinm?  If so, is that
because the rlwinm is a bug fix, or is it because you know something
special about KSP_LIMIT(r8) which means that rlwinm and rlwimi are
equivalent here?

Paul.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v4] PPC: use CURRENT_THREAD_INFO instead of open coded assembly
  2012-07-12 22:45 ` Paul Mackerras
@ 2012-07-13  3:33   ` Benjamin Herrenschmidt
  0 siblings, 0 replies; 4+ messages in thread
From: Benjamin Herrenschmidt @ 2012-07-13  3:33 UTC (permalink / raw)
  To: Paul Mackerras; +Cc: sfr, linuxppc-dev, Stuart Yoder, agraf

On Fri, 2012-07-13 at 08:45 +1000, Paul Mackerras wrote:
> On Thu, Jul 05, 2012 at 09:41:35AM -0500, Stuart Yoder wrote:
> 
> > diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
> > index ba3aeb4..bad42e3 100644
> > --- a/arch/powerpc/kernel/entry_32.S
> > +++ b/arch/powerpc/kernel/entry_32.S
> > @@ -92,7 +92,7 @@ crit_transfer_to_handler:
> >  	mfspr	r8,SPRN_SPRG_THREAD
> >  	lwz	r0,KSP_LIMIT(r8)
> >  	stw	r0,SAVED_KSP_LIMIT(r11)
> > -	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
> > +	CURRENT_THREAD_INFO(r0, r1)
> >  	stw	r0,KSP_LIMIT(r8)
> >  	/* fall through */
> >  #endif
> > @@ -112,7 +112,7 @@ crit_transfer_to_handler:
> >  	mfspr	r8,SPRN_SPRG_THREAD
> >  	lwz	r0,KSP_LIMIT(r8)
> >  	stw	r0,saved_ksp_limit@l(0)
> > -	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
> > +	CURRENT_THREAD_INFO(r0, r1)
> >  	stw	r0,KSP_LIMIT(r8)
> >  	/* fall through */
> >  #endif
> 
> Do you really mean to replace a rlwimi with a rlwinm?  If so, is that
> because the rlwinm is a bug fix, or is it because you know something
> special about KSP_LIMIT(r8) which means that rlwinm and rlwimi are
> equivalent here?

Ah that's an interesting one I hadn't spotted when reviewing. Both
variants (rlwimi and rlwinm)  will effectively replace the top bits of
KSP_LIMIT, switching it to the current stack.

The difference is that the original one (rlwimi) will preserve the
bottom bits.

Now, do we want those bottom bits ? It looks like we do if we want to
make the thread_info at the bottom of the stack as "out of bounds".

So the patch breaks that. It will not generally break a working kernel
but the stack overflow detection may not trigger if the overflow is just
enough to override the thread infos.

Stuart, I already applied v4 of the patch to powerpc-next and I'd rather
not rebase it, can you send a fixup patch please, one that will
effecticely revert those two hunks, we can leave that open coded. While
at it, please add a comment explaining what the code does to avoid
similar confusion in the future.

Cheers,
Ben.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-07-13  3:34 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-07-05 14:41 [PATCH v4] PPC: use CURRENT_THREAD_INFO instead of open coded assembly Stuart Yoder
2012-07-10 22:54 ` Alexander Graf
2012-07-12 22:45 ` Paul Mackerras
2012-07-13  3:33   ` Benjamin Herrenschmidt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).