linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] powerpc/booke: Avoid link stack corruption in several places
@ 2021-08-23  7:53 Christophe Leroy
  2021-08-23 15:58 ` Segher Boessenkool
  0 siblings, 1 reply; 4+ messages in thread
From: Christophe Leroy @ 2021-08-23  7:53 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman
  Cc: linux-kernel, linuxppc-dev

Use bcl 20,31,+4 instead of bl in order to preserve link stack.

See commit c974809a26a1 ("powerpc/vdso: Avoid link stack corruption
in __get_datapage()") for details.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/ppc_asm.h            | 2 +-
 arch/powerpc/kernel/exceptions-64e.S          | 6 +++---
 arch/powerpc/kernel/fsl_booke_entry_mapping.S | 8 ++++----
 arch/powerpc/kernel/head_44x.S                | 6 +++---
 arch/powerpc/kernel/head_fsl_booke.S          | 6 +++---
 arch/powerpc/mm/nohash/tlb_low.S              | 4 ++--
 6 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index ffe712307e11..2a675ba927ab 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -260,7 +260,7 @@ GLUE(.,name):
 
 /* Be careful, this will clobber the lr register. */
 #define LOAD_REG_ADDR_PIC(reg, name)		\
-	bl	0f;				\
+	bcl	20,31,0f			\
 0:	mflr	reg;				\
 	addis	reg,reg,(name - 0b)@ha;		\
 	addi	reg,reg,(name - 0b)@l;
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 1401787b0b93..0a1835a0ec12 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1127,7 +1127,7 @@ found_iprot:
  * r3 = MAS0_TLBSEL (for the iprot array)
  * r4 = SPRN_TLBnCFG
  */
-	bl	invstr				/* Find our address */
+	bcl	20,31,invstr			/* Find our address */
 invstr:	mflr	r6				/* Make it accessible */
 	mfmsr	r7
 	rlwinm	r5,r7,27,31,31			/* extract MSR[IS] */
@@ -1196,7 +1196,7 @@ skpinv:	addi	r6,r6,1				/* Increment */
 	mfmsr	r6
 	xori	r6,r6,MSR_IS
 	mtspr	SPRN_SRR1,r6
-	bl	1f		/* Find our address */
+	bcl	20,31,1f	/* Find our address */
 1:	mflr	r6
 	addi	r6,r6,(2f - 1b)
 	mtspr	SPRN_SRR0,r6
@@ -1256,7 +1256,7 @@ skpinv:	addi	r6,r6,1				/* Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
 	/* Now we branch the new virtual address mapped by this entry */
-	bl	1f		/* Find our address */
+	bcl	20,31,1f	/* Find our address */
 1:	mflr	r6
 	addi	r6,r6,(2f - 1b)
 	tovirt(r6,r6)
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index 8bccce6544b5..a9e2235f6c40 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 
 /* 1. Find the index of the entry we're executing in */
-	bl	invstr				/* Find our address */
+	bcl	20,31,invstr				/* Find our address */
 invstr:	mflr	r6				/* Make it accessible */
 	mfmsr	r7
 	rlwinm	r4,r7,27,31,31			/* extract MSR[IS] */
@@ -85,7 +85,7 @@ skpinv:	addi	r6,r6,1				/* Increment */
 	addi	r6,r6,10
 	slw	r6,r8,r6	/* convert to mask */
 
-	bl	1f		/* Find our address */
+	bcl	20,31,1f	/* Find our address */
 1:	mflr	r7
 
 	mfspr	r8,SPRN_MAS3
@@ -117,7 +117,7 @@ skpinv:	addi	r6,r6,1				/* Increment */
 
 	xori	r6,r4,1
 	slwi	r6,r6,5		/* setup new context with other address space */
-	bl	1f		/* Find our address */
+	bcl	20,31,1f	/* Find our address */
 1:	mflr	r9
 	rlwimi	r7,r9,0,20,31
 	addi	r7,r7,(2f - 1b)
@@ -207,7 +207,7 @@ next_tlb_setup:
 
 	lis	r7,MSR_KERNEL@h
 	ori	r7,r7,MSR_KERNEL@l
-	bl	1f			/* Find our address */
+	bcl	20,31,1f		/* Find our address */
 1:	mflr	r9
 	rlwimi	r6,r9,0,20,31
 	addi	r6,r6,(2f - 1b)
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index ddc978a2d381..b14efa87d1cf 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -70,7 +70,7 @@ _ENTRY(_start);
  * address.
  * r21 will be loaded with the physical runtime address of _stext
  */
-	bl	0f				/* Get our runtime address */
+	bcl	20,31,0f			/* Get our runtime address */
 0:	mflr	r21				/* Make it accessible */
 	addis	r21,r21,(_stext - 0b)@ha
 	addi	r21,r21,(_stext - 0b)@l 	/* Get our current runtime base */
@@ -853,7 +853,7 @@ _GLOBAL(init_cpu_state)
 wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
 	sync
 
-	bl	invstr				/* Find our address */
+	bcl	20,31,invstr			/* Find our address */
 invstr:	mflr	r5				/* Make it accessible */
 	tlbsx	r23,0,r5			/* Find entry we are in */
 	li	r4,0				/* Start at TLB entry 0 */
@@ -1045,7 +1045,7 @@ head_start_47x:
 	sync
 
 	/* Find the entry we are running from */
-	bl	1f
+	bcl	20,31,1f
 1:	mflr	r23
 	tlbsx	r23,0,r23
 	tlbre	r24,r23,0
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 0f9642f36b49..dd197da2ffcc 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -79,7 +79,7 @@ _ENTRY(_start);
 	mr	r23,r3
 	mr	r25,r4
 
-	bl	0f
+	bcl	20,31,0f
 0:	mflr	r8
 	addis	r3,r8,(is_second_reloc - 0b)@ha
 	lwz	r19,(is_second_reloc - 0b)@l(r3)
@@ -1132,7 +1132,7 @@ _GLOBAL(switch_to_as1)
 	bne	1b
 
 	/* Get the tlb entry used by the current running code */
-	bl	0f
+	bcl	20,31,0f
 0:	mflr	r4
 	tlbsx	0,r4
 
@@ -1166,7 +1166,7 @@ _GLOBAL(switch_to_as1)
 _GLOBAL(restore_to_as0)
 	mflr	r0
 
-	bl	0f
+	bcl	20,31,0f
 0:	mflr	r9
 	addi	r9,r9,1f - 0b
 
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index 4613bf8e9aae..8b225a3df7e3 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
  * Touch enough instruction cache lines to ensure cache hits
  */
 1:	mflr	r9
-	bl	2f
+	bcl	20,31,2f
 2:	mflr	r6
 	li	r7,32
 	PPC_ICBT(0,R6,R7)		/* touch next cache line */
@@ -414,7 +414,7 @@ _GLOBAL(loadcam_multi)
 	 * Set up temporary TLB entry that is the same as what we're
 	 * running from, but in AS=1.
 	 */
-	bl	1f
+	bcl	20,31,1f
 1:	mflr	r6
 	tlbsx	0,r8
 	mfspr	r6,SPRN_MAS1
-- 
2.25.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] powerpc/booke: Avoid link stack corruption in several places
  2021-08-23  7:53 [PATCH] powerpc/booke: Avoid link stack corruption in several places Christophe Leroy
@ 2021-08-23 15:58 ` Segher Boessenkool
  2021-08-23 17:05   ` Christophe Leroy
  0 siblings, 1 reply; 4+ messages in thread
From: Segher Boessenkool @ 2021-08-23 15:58 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
	linuxppc-dev, linux-kernel

On Mon, Aug 23, 2021 at 07:53:01AM +0000, Christophe Leroy wrote:
>  /* Be careful, this will clobber the lr register. */
>  #define LOAD_REG_ADDR_PIC(reg, name)		\
> -	bl	0f;				\
> +	bcl	20,31,0f			\
>  0:	mflr	reg;				\
>  	addis	reg,reg,(name - 0b)@ha;		\
>  	addi	reg,reg,(name - 0b)@l;

The code ended each line with a semicolon before, for absolutely no
reason that I can see, but still.  Fixing that would be nice, but only
doing it on one line isn't good.

Btw.  Both the 7450 and the modern cores implementing this really need
this to be $+4, so it is a lot clearer to write that instead of 1f or
a named label.


Segher

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] powerpc/booke: Avoid link stack corruption in several places
  2021-08-23 15:58 ` Segher Boessenkool
@ 2021-08-23 17:05   ` Christophe Leroy
  2021-08-23 20:12     ` Segher Boessenkool
  0 siblings, 1 reply; 4+ messages in thread
From: Christophe Leroy @ 2021-08-23 17:05 UTC (permalink / raw)
  To: Segher Boessenkool
  Cc: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
	linuxppc-dev, linux-kernel



Le 23/08/2021 à 17:58, Segher Boessenkool a écrit :
> On Mon, Aug 23, 2021 at 07:53:01AM +0000, Christophe Leroy wrote:
>>   /* Be careful, this will clobber the lr register. */
>>   #define LOAD_REG_ADDR_PIC(reg, name)		\
>> -	bl	0f;				\
>> +	bcl	20,31,0f			\
>>   0:	mflr	reg;				\
>>   	addis	reg,reg,(name - 0b)@ha;		\
>>   	addi	reg,reg,(name - 0b)@l;
> 
> The code ended each line with a semicolon before, for absolutely no
> reason that I can see, but still.  Fixing that would be nice, but only
> doing it on one line isn't good.

Sure, forgetting the semicolon broke the build. That's because the backslash removes the newline.

The cleanest way I found to fix that quite of stuff is by using GAS macro, as I did for 
LOAD_REG_IMMEDIATE() some time ago.

> 
> Btw.  Both the 7450 and the modern cores implementing this really need
> this to be $+4, so it is a lot clearer to write that instead of 1f or
> a named label.

I like that, removing unneeded labels will make it smoother and clearer. I'll do it.

Christophe

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] powerpc/booke: Avoid link stack corruption in several places
  2021-08-23 17:05   ` Christophe Leroy
@ 2021-08-23 20:12     ` Segher Boessenkool
  0 siblings, 0 replies; 4+ messages in thread
From: Segher Boessenkool @ 2021-08-23 20:12 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
	linuxppc-dev, linux-kernel

On Mon, Aug 23, 2021 at 07:05:38PM +0200, Christophe Leroy wrote:
> Le 23/08/2021 à 17:58, Segher Boessenkool a écrit :
> >On Mon, Aug 23, 2021 at 07:53:01AM +0000, Christophe Leroy wrote:
> >>  /* Be careful, this will clobber the lr register. */
> >>  #define LOAD_REG_ADDR_PIC(reg, name)		\
> >>-	bl	0f;				\
> >>+	bcl	20,31,0f			\
> >>  0:	mflr	reg;				\
> >>  	addis	reg,reg,(name - 0b)@ha;		\
> >>  	addi	reg,reg,(name - 0b)@l;
> >
> >The code ended each line with a semicolon before, for absolutely no
> >reason that I can see, but still.  Fixing that would be nice, but only
> >doing it on one line isn't good.
> 
> Sure, forgetting the semicolon broke the build. That's because the 
> backslash removes the newline.

Ah right, one of the surprises you get from using the C preprocessor on
non-C code :-)

> The cleanest way I found to fix that quite of stuff is by using GAS macro, 
> as I did for LOAD_REG_IMMEDIATE() some time ago.

Yeah, good plan.  You can use loops and saner parameters etc. as well if
you do :-)

> >Btw.  Both the 7450 and the modern cores implementing this really need
> >this to be $+4, so it is a lot clearer to write that instead of 1f or
> >a named label.
> 
> I like that, removing unneeded labels will make it smoother and clearer. 
> I'll do it.

Cool, thanks!


Segher

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-08-23 20:18 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-23  7:53 [PATCH] powerpc/booke: Avoid link stack corruption in several places Christophe Leroy
2021-08-23 15:58 ` Segher Boessenkool
2021-08-23 17:05   ` Christophe Leroy
2021-08-23 20:12     ` Segher Boessenkool

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).