linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing
@ 2022-12-27  9:26 Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 1/9] crypto: powerpc - Use address generation helper for asm Nicholas Piggin
                   ` (8 more replies)
  0 siblings, 9 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

This is a more complete change than my earlier hack. Namely fixed the
boot code so it's more unified rather than adding a special case for
Book3S+PCREL. Lots of bug fixes, and adding some of the ftrace and BPF
trampoline/stubs. And added module support, which might be the most
interesting bit.

This won't see a lot of real use until POWER10 is the oldest supported
CPU for distros, but being as we're quite a unique user of toolchain I'd
like to start ironing things out earlier rather than later. I'm making a
list of observations here, https://github.com/linuxppc/issues/issues/455
and will take them to toolchan developers after the kernel work is a bit
further along.

Thanks,
Nick

Nicholas Piggin (9):
  crypto: powerpc - Use address generation helper for asm
  powerpc/64s: Refactor initialisation after prom
  powerpc/64e: Simplify address calculation in secondary hold loop
  powerpc/64: Move initial base and TOC pointer calculation
  powerpc/64s: Run at the kernel virtual address earlier in boot
  powerpc: add CFUNC assembly label annotation
  powerpc/64: Add support to build with prefixed instructions
  powerpc/64: vmlinux support building with PCREL addresing
  powerpc/64: modules support building with PCREL addresing

 arch/powerpc/Kconfig                    |   6 +
 arch/powerpc/Makefile                   |  10 +
 arch/powerpc/crypto/crc32-vpmsum_core.S |  13 +-
 arch/powerpc/include/asm/atomic.h       |  24 +-
 arch/powerpc/include/asm/io.h           |  37 +++
 arch/powerpc/include/asm/module.h       |   9 +-
 arch/powerpc/include/asm/paca.h         |   2 +
 arch/powerpc/include/asm/ppc-opcode.h   |   8 +
 arch/powerpc/include/asm/ppc_asm.h      |  24 ++
 arch/powerpc/include/asm/sections.h     |   5 +
 arch/powerpc/include/asm/uaccess.h      |  28 +-
 arch/powerpc/include/uapi/asm/elf.h     |   4 +
 arch/powerpc/kernel/asm-offsets.c       |   2 +
 arch/powerpc/kernel/exceptions-64s.S    | 112 ++++----
 arch/powerpc/kernel/head_64.S           | 179 +++++++-----
 arch/powerpc/kernel/interrupt_64.S      |  28 +-
 arch/powerpc/kernel/irq.c               |   8 +
 arch/powerpc/kernel/misc_64.S           |   2 +-
 arch/powerpc/kernel/module_64.c         | 344 ++++++++++++++++++++----
 arch/powerpc/kernel/paca.c              |   2 +
 arch/powerpc/kernel/trace/ftrace.c      |  52 +++-
 arch/powerpc/kernel/vdso/gettimeofday.S |   6 +-
 arch/powerpc/kernel/vector.S            |   6 +
 arch/powerpc/kernel/vmlinux.lds.S       |   6 +
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |  16 +-
 arch/powerpc/lib/copypage_64.S          |   4 +-
 arch/powerpc/lib/copypage_power7.S      |   4 +-
 arch/powerpc/lib/copyuser_power7.S      |   8 +-
 arch/powerpc/lib/hweight_64.S           |   8 +-
 arch/powerpc/lib/memcmp_64.S            |   4 +-
 arch/powerpc/lib/memcpy_power7.S        |   6 +-
 arch/powerpc/net/bpf_jit.h              |  10 +-
 arch/powerpc/net/bpf_jit_comp64.c       |  35 ++-
 arch/powerpc/platforms/Kconfig.cputype  |  38 +++
 arch/powerpc/platforms/pseries/hvCall.S |   4 +-
 arch/powerpc/xmon/xmon.c                |   2 +
 36 files changed, 793 insertions(+), 263 deletions(-)

-- 
2.37.2


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [RFC PATCH 1/9] crypto: powerpc - Use address generation helper for asm
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 2/9] powerpc/64s: Refactor initialisation after prom Nicholas Piggin
                   ` (7 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/crypto/crc32-vpmsum_core.S | 13 ++++---------
 1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/crypto/crc32-vpmsum_core.S b/arch/powerpc/crypto/crc32-vpmsum_core.S
index a16a717c809c..b0f87f595b26 100644
--- a/arch/powerpc/crypto/crc32-vpmsum_core.S
+++ b/arch/powerpc/crypto/crc32-vpmsum_core.S
@@ -113,9 +113,7 @@ FUNC_START(CRC_FUNCTION_NAME)
 #endif
 
 #ifdef BYTESWAP_DATA
-	addis	r3,r2,.byteswap_constant@toc@ha
-	addi	r3,r3,.byteswap_constant@toc@l
-
+	LOAD_REG_ADDR(r3, .byteswap_constant)
 	lvx	byteswap,0,r3
 	addi	r3,r3,16
 #endif
@@ -150,8 +148,7 @@ FUNC_START(CRC_FUNCTION_NAME)
 	addi	r7,r7,-1
 	mtctr	r7
 
-	addis	r3,r2,.constants@toc@ha
-	addi	r3,r3,.constants@toc@l
+	LOAD_REG_ADDR(r3, .constants)
 
 	/* Find the start of our constants */
 	add	r3,r3,r8
@@ -506,8 +503,7 @@ FUNC_START(CRC_FUNCTION_NAME)
 
 .Lbarrett_reduction:
 	/* Barrett constants */
-	addis	r3,r2,.barrett_constants@toc@ha
-	addi	r3,r3,.barrett_constants@toc@l
+	LOAD_REG_ADDR(r3, .barrett_constants)
 
 	lvx	const1,0,r3
 	lvx	const2,off16,r3
@@ -610,8 +606,7 @@ FUNC_START(CRC_FUNCTION_NAME)
 	cmpdi	r5,0
 	beq	.Lzero
 
-	addis	r3,r2,.short_constants@toc@ha
-	addi	r3,r3,.short_constants@toc@l
+	LOAD_REG_ADDR(r3, .short_constants)
 
 	/* Calculate where in the constant table we need to start */
 	subfic	r6,r5,256
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 2/9] powerpc/64s: Refactor initialisation after prom
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 1/9] crypto: powerpc - Use address generation helper for asm Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 3/9] powerpc/64e: Simplify address calculation in secondary hold loop Nicholas Piggin
                   ` (6 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

Move some basic Book3S initialisation after prom to a function similar
to what Book3E looks like. Book3E returns from this function at the
virtual address mapping, and Book3S will do the same in a later change,
so making them look similar helps with that.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/head_64.S | 44 ++++++++++++++++++++---------------
 1 file changed, 25 insertions(+), 19 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 7558ba4eb864..5af2e473b195 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -475,8 +475,30 @@ SYM_FUNC_START_LOCAL(__mmu_off)
 	rfid
 	b	.	/* prevent speculative execution */
 SYM_FUNC_END(__mmu_off)
-#endif
 
+start_initialization_book3s:
+	mflr	r25
+
+	/* Setup some critical 970 SPRs before switching MMU off */
+	mfspr	r0,SPRN_PVR
+	srwi	r0,r0,16
+	cmpwi	r0,0x39		/* 970 */
+	beq	1f
+	cmpwi	r0,0x3c		/* 970FX */
+	beq	1f
+	cmpwi	r0,0x44		/* 970MP */
+	beq	1f
+	cmpwi	r0,0x45		/* 970GX */
+	bne	2f
+1:	bl	__cpu_preinit_ppc970
+2:
+
+	/* Switch off MMU if not already off */
+	bl	__mmu_off
+
+	mtlr	r25
+	blr
+#endif
 
 /*
  * Here is our main kernel entry point. We support currently 2 kind of entries
@@ -523,26 +545,10 @@ __start_initialization_multiplatform:
 
 #ifdef CONFIG_PPC_BOOK3E_64
 	bl	start_initialization_book3e
-	b	__after_prom_start
 #else
-	/* Setup some critical 970 SPRs before switching MMU off */
-	mfspr	r0,SPRN_PVR
-	srwi	r0,r0,16
-	cmpwi	r0,0x39		/* 970 */
-	beq	1f
-	cmpwi	r0,0x3c		/* 970FX */
-	beq	1f
-	cmpwi	r0,0x44		/* 970MP */
-	beq	1f
-	cmpwi	r0,0x45		/* 970GX */
-	bne	2f
-1:	bl	__cpu_preinit_ppc970
-2:
-
-	/* Switch off MMU if not already off */
-	bl	__mmu_off
-	b	__after_prom_start
+	bl	start_initialization_book3s
 #endif /* CONFIG_PPC_BOOK3E_64 */
+	b	__after_prom_start
 
 __REF
 __boot_from_prom:
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 3/9] powerpc/64e: Simplify address calculation in secondary hold loop
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 1/9] crypto: powerpc - Use address generation helper for asm Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 2/9] powerpc/64s: Refactor initialisation after prom Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 4/9] powerpc/64: Move initial base and TOC pointer calculation Nicholas Piggin
                   ` (5 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

As the earlier comment explains, __secondary_hold_spinloop does not have
to be accessed at its virtual address, slightly simplifying code.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/head_64.S | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 5af2e473b195..3a7266fa8a18 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -160,12 +160,8 @@ __secondary_hold:
 	std	r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0)
 	sync
 
-	li	r26,0
-#ifdef CONFIG_PPC_BOOK3E_64
-	tovirt(r26,r26)
-#endif
 	/* All secondary cpus wait here until told to start. */
-100:	ld	r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(r26)
+100:	ld	r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(0)
 	cmpdi	0,r12,0
 	beq	100b
 
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 4/9] powerpc/64: Move initial base and TOC pointer calculation
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
                   ` (2 preceding siblings ...)
  2022-12-27  9:26 ` [RFC PATCH 3/9] powerpc/64e: Simplify address calculation in secondary hold loop Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 5/9] powerpc/64s: Run at the kernel virtual address earlier in boot Nicholas Piggin
                   ` (4 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

A later change moves the non-prom case to run at the virtual address
earlier, which calls for virtual TOC and kernel base. Split these two
calculations for prom and non-prom to make that change simpler.

Signed: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/head_64.S | 28 +++++++++++++++++++---------
 1 file changed, 19 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3a7266fa8a18..63f3b9b3cf7e 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -514,15 +514,6 @@ __start_initialization_multiplatform:
 	/* Zero r13 (paca) so early program check / mce don't use it */
 	li	r13,0
 
-	/* Get TOC pointer (current runtime address) */
-	bl	relative_toc
-
-	/* find out where we are now */
-	bcl	20,31,$+4
-0:	mflr	r26			/* r26 = runtime addr here */
-	addis	r26,r26,(_stext - 0b)@ha
-	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */
-
 	/*
 	 * Are we booted from a PROM Of-type client-interface ?
 	 */
@@ -544,11 +535,30 @@ __start_initialization_multiplatform:
 #else
 	bl	start_initialization_book3s
 #endif /* CONFIG_PPC_BOOK3E_64 */
+
+	/* Get TOC pointer */
+	bl	relative_toc
+
+	/* find out where we are now */
+	bcl	20,31,$+4
+0:	mflr	r26			/* r26 = runtime addr here */
+	addis	r26,r26,(_stext - 0b)@ha
+	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */
+
 	b	__after_prom_start
 
 __REF
 __boot_from_prom:
 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
+	/* Get TOC pointer */
+	bl	relative_toc
+
+	/* find out where we are now */
+	bcl	20,31,$+4
+0:	mflr	r26			/* r26 = runtime addr here */
+	addis	r26,r26,(_stext - 0b)@ha
+	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */
+
 	/* Save parameters */
 	mr	r31,r3
 	mr	r30,r4
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 5/9] powerpc/64s: Run at the kernel virtual address earlier in boot
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
                   ` (3 preceding siblings ...)
  2022-12-27  9:26 ` [RFC PATCH 4/9] powerpc/64: Move initial base and TOC pointer calculation Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 6/9] powerpc: add CFUNC assembly label annotation Nicholas Piggin
                   ` (3 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

This mostly consolidates the Book3E and Book3S behaviour in boot WRT
executing from the physical or virtual address.

Book3E sets up kernel virtual linear map in start_initialization_book3e
and runs from the virtual linear alias after that. This change makes
Book3S begin to execute from the virtual alias at the same point. Book3S
can not use its MMU for that at this point, but when the MMU is disabled,
the virtual linear address correctly aliases to physical memory because
the top bits of the address are ignored with MMU disabled.

Secondaries execute from the virtual address similarly early.

This reduces the differences between subarchs, but the main motivation
was to enable the PC-relative addressing ABI for Book3S, where pointer
calculations must execute from the virtual address or the top bits of
the pointer will be lost. This is similar to the requirement the TOC
relative addressing already has that the TOC pointer use its virtual
address.

XXX: I expect this to blow up everywhere, I've not tested a huge range
of platforms, and mostly in QEMU.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/head_64.S | 83 +++++++++++++++++++----------------
 1 file changed, 45 insertions(+), 38 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 63f3b9b3cf7e..33a5fbfdc180 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -76,6 +76,13 @@
  *   2. The kernel is entered at __start
  */
 
+/*
+ * boot_from_prom and prom_init run at the physical address. Everything
+ * after prom and kexec entry run at the virtual address (PAGE_OFFSET).
+ * Secondaries run at the virtual address from generic_secondary_common_init
+ * onward.
+ */
+
 OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
 USE_FIXED_SECTION(first_256B)
 	/*
@@ -303,13 +310,11 @@ _GLOBAL(fsl_secondary_thread_init)
 	/* turn on 64-bit mode */
 	bl	enable_64b_mode
 
-	/* get a valid TOC pointer, wherever we're mapped at */
-	bl	relative_toc
-	tovirt(r2,r2)
-
 	/* Book3E initialization */
 	mr	r3,r24
 	bl	book3e_secondary_thread_init
+	bl	relative_toc
+
 	b	generic_secondary_common_init
 
 #endif /* CONFIG_PPC_BOOK3E_64 */
@@ -331,16 +336,12 @@ _GLOBAL(generic_secondary_smp_init)
 	/* turn on 64-bit mode */
 	bl	enable_64b_mode
 
-	/* get a valid TOC pointer, wherever we're mapped at */
-	bl	relative_toc
-	tovirt(r2,r2)
-
 #ifdef CONFIG_PPC_BOOK3E_64
 	/* Book3E initialization */
 	mr	r3,r24
 	mr	r4,r25
 	bl	book3e_secondary_core_init
-
+	/* Now NIA and r2 are relocated to PAGE_OFFSET if not already */
 /*
  * After common core init has finished, check if the current thread is the
  * one we wanted to boot. If not, start the specified thread and stop the
@@ -378,6 +379,16 @@ _GLOBAL(generic_secondary_smp_init)
 10:
 	b	10b
 20:
+#else
+	/* Now the MMU is off, can branch to our PAGE_OFFSET address */
+	bcl	20,31,$+4
+1:	mflr	r11
+	addi	r11,r11,(2f - 1b)
+	tovirt(r11, r11)
+	mtctr	r11
+	bctr
+2:
+	bl	relative_toc
 #endif
 
 generic_secondary_common_init:
@@ -492,6 +503,8 @@ start_initialization_book3s:
 	/* Switch off MMU if not already off */
 	bl	__mmu_off
 
+	/* Now the MMU is off, can return to our PAGE_OFFSET address */
+	tovirt(r25,r25)
 	mtlr	r25
 	blr
 #endif
@@ -530,16 +543,19 @@ __start_initialization_multiplatform:
 	mr	r29,r9
 #endif
 
+	/* These functions return to the virtual (PAGE_OFFSET) address */
 #ifdef CONFIG_PPC_BOOK3E_64
 	bl	start_initialization_book3e
 #else
 	bl	start_initialization_book3s
 #endif /* CONFIG_PPC_BOOK3E_64 */
 
-	/* Get TOC pointer */
+	/* Get TOC pointer, virtual */
 	bl	relative_toc
 
 	/* find out where we are now */
+
+	/* OPAL doesn't pass base address in r4, have to derive it. */
 	bcl	20,31,$+4
 0:	mflr	r26			/* r26 = runtime addr here */
 	addis	r26,r26,(_stext - 0b)@ha
@@ -550,7 +566,7 @@ __start_initialization_multiplatform:
 __REF
 __boot_from_prom:
 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
-	/* Get TOC pointer */
+	/* Get TOC pointer, non-virtual */
 	bl	relative_toc
 
 	/* find out where we are now */
@@ -599,18 +615,11 @@ __boot_from_prom:
 __after_prom_start:
 #ifdef CONFIG_RELOCATABLE
 	/* process relocations for the final address of the kernel */
-	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
-	sldi	r25,r25,32
-#if defined(CONFIG_PPC_BOOK3E_64)
-	tovirt(r26,r26)		/* on booke, we already run at PAGE_OFFSET */
-#endif
 	lwz	r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
-#if defined(CONFIG_PPC_BOOK3E_64)
-	tophys(r26,r26)
-#endif
 	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
-	bne	1f
-	add	r25,r25,r26
+	mr	r25,r26		/* then use current kernel base */
+	beq	1f
+	LOAD_REG_IMMEDIATE(r25, PAGE_OFFSET) /* else use static kernel base */
 1:	mr	r3,r25
 	bl	relocate
 #if defined(CONFIG_PPC_BOOK3E_64)
@@ -626,14 +635,8 @@ __after_prom_start:
  *
  * Note: This process overwrites the OF exception vectors.
  */
-	li	r3,0			/* target addr */
-#ifdef CONFIG_PPC_BOOK3E_64
-	tovirt(r3,r3)		/* on booke, we already run at PAGE_OFFSET */
-#endif
+	LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
 	mr.	r4,r26			/* In some cases the loader may  */
-#if defined(CONFIG_PPC_BOOK3E_64)
-	tovirt(r4,r4)
-#endif
 	beq	9f			/* have already put us at zero */
 	li	r6,0x100		/* Start offset, the first 0x100 */
 					/* bytes were copied earlier.	 */
@@ -644,9 +647,6 @@ __after_prom_start:
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
-#if defined(CONFIG_PPC_BOOK3E_64)
-	tovirt(r26,r26)		/* on booke, we already run at PAGE_OFFSET */
-#endif
 	lwz	r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
 	cmplwi	cr0,r7,1
 	bne	3f
@@ -686,6 +686,7 @@ p_end: .8byte _end - copy_to_here
 	addis   r8,r26,(ABS_ADDR(p_end, text))@ha
 	ld      r8,(ABS_ADDR(p_end, text))@l(r8)
 	add	r5,r5,r8
+
 5:	bl	copy_and_flush		/* copy the rest */
 
 9:	b	start_here_multiplatform
@@ -765,9 +766,15 @@ _GLOBAL(pmac_secondary_start)
 	sync
 	slbia
 
-	/* get TOC pointer (real address) */
+	/* Branch to our PAGE_OFFSET address */
+	bcl	20,31,$+4
+1:	mflr	r11
+	addi	r11,r11,(2f - 1b)
+	tovirt(r11, r11)
+	mtctr	r11
+	bctr
+2:
 	bl	relative_toc
-	tovirt(r2,r2)
 
 	/* Copy some CPU settings from CPU 0 */
 	bl	__restore_cpu_ppc970
@@ -906,8 +913,9 @@ SYM_FUNC_END(enable_64b_mode)
  * TOC in -mcmodel=medium mode. After we relocate to 0 but before
  * the MMU is on we need our TOC to be a virtual address otherwise
  * these pointers will be real addresses which may get stored and
- * accessed later with the MMU on. We use tovirt() at the call
- * sites to handle this.
+ * accessed later with the MMU on. We branch to the virtual address
+ * while still in real mode then call relative_toc again to handle
+ * this.
  */
 _GLOBAL(relative_toc)
 	mflr	r0
@@ -926,9 +934,8 @@ p_toc:	.8byte	.TOC. - 0b
  */
 __REF
 start_here_multiplatform:
-	/* set up the TOC */
-	bl      relative_toc
-	tovirt(r2,r2)
+	/* Adjust the TOC for moved kernel (XXX why not readjust where we move it?) */
+	bl	relative_toc
 
 	/* Clear out the BSS. It may have been done in prom_init,
 	 * already but that's irrelevant since prom_init will soon
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 6/9] powerpc: add CFUNC assembly label annotation
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
                   ` (4 preceding siblings ...)
  2022-12-27  9:26 ` [RFC PATCH 5/9] powerpc/64s: Run at the kernel virtual address earlier in boot Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 7/9] powerpc/64: Add support to build with prefixed instructions Nicholas Piggin
                   ` (2 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

This macro is to be used in assembly where C functions are called.
pcrel addressing mode requires branches to functions with a
localentry value of 1 to have either a trailing nop or @notoc.
This macro permits the latter without changing callers.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/include/asm/ppc_asm.h      |   5 ++
 arch/powerpc/kernel/exceptions-64s.S    | 112 ++++++++++++------------
 arch/powerpc/kernel/head_64.S           |  12 +--
 arch/powerpc/kernel/interrupt_64.S      |  28 +++---
 arch/powerpc/kernel/misc_64.S           |   2 +-
 arch/powerpc/kernel/vdso/gettimeofday.S |   6 +-
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |  16 ++--
 arch/powerpc/lib/copypage_power7.S      |   4 +-
 arch/powerpc/lib/copyuser_power7.S      |   8 +-
 arch/powerpc/lib/hweight_64.S           |   8 +-
 arch/powerpc/lib/memcmp_64.S            |   4 +-
 arch/powerpc/lib/memcpy_power7.S        |   6 +-
 arch/powerpc/platforms/pseries/hvCall.S |   4 +-
 13 files changed, 112 insertions(+), 103 deletions(-)

diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index d2f44612f4b0..9f64f9a6a897 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -180,6 +180,11 @@
 
 #ifdef __KERNEL__
 
+/*
+ * Used to name C functions called from asm
+ */
+#define CFUNC(name) name
+
 /*
  * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
  * version below in the else case of the ifdef.
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 6441a1ba57ac..c33c8ebf8641 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1075,7 +1075,7 @@ EXC_COMMON_BEGIN(system_reset_common)
 	__GEN_COMMON_BODY system_reset
 
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	system_reset_exception
+	bl	CFUNC(system_reset_exception)
 
 	/* Clear MSR_RI before setting SRR0 and SRR1. */
 	li	r9,0
@@ -1223,9 +1223,9 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 	addi	r3,r1,STACK_INT_FRAME_REGS
 BEGIN_FTR_SECTION
-	bl	machine_check_early_boot
+	bl	CFUNC(machine_check_early_boot)
 END_FTR_SECTION(0, 1)     // nop out after boot
-	bl	machine_check_early
+	bl	CFUNC(machine_check_early)
 	std	r3,RESULT(r1)	/* Save result */
 	ld	r12,_MSR(r1)
 
@@ -1286,7 +1286,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 	 * Queue up the MCE event so that we can log it later, while
 	 * returning from kernel or opal call.
 	 */
-	bl	machine_check_queue_event
+	bl	CFUNC(machine_check_queue_event)
 	MACHINE_CHECK_HANDLER_WINDUP
 	RFI_TO_KERNEL
 
@@ -1312,7 +1312,7 @@ EXC_COMMON_BEGIN(machine_check_common)
 	 */
 	GEN_COMMON machine_check
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	machine_check_exception_async
+	bl	CFUNC(machine_check_exception_async)
 	b	interrupt_return_srr
 
 
@@ -1322,7 +1322,7 @@ EXC_COMMON_BEGIN(machine_check_common)
  * done. Queue the event then call the idle code to do the wake up.
  */
 EXC_COMMON_BEGIN(machine_check_idle_common)
-	bl	machine_check_queue_event
+	bl	CFUNC(machine_check_queue_event)
 
 	/*
 	 * GPR-loss wakeups are relatively straightforward, because the
@@ -1361,7 +1361,7 @@ EXC_COMMON_BEGIN(unrecoverable_mce)
 BEGIN_FTR_SECTION
 	li	r10,0 /* clear MSR_RI */
 	mtmsrd	r10,1
-	bl	disable_machine_check
+	bl	CFUNC(disable_machine_check)
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 	ld	r10,PACAKMSR(r13)
 	li	r3,MSR_ME
@@ -1378,14 +1378,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 	 * the early handler which is a true NMI.
 	 */
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	machine_check_exception
+	bl	CFUNC(machine_check_exception)
 
 	/*
 	 * We will not reach here. Even if we did, there is no way out.
 	 * Call unrecoverable_exception and die.
 	 */
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	unrecoverable_exception
+	bl	CFUNC(unrecoverable_exception)
 	b	.
 
 
@@ -1440,16 +1440,16 @@ EXC_COMMON_BEGIN(data_access_common)
 	bne-	1f
 #ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
-	bl	do_hash_fault
+	bl	CFUNC(do_hash_fault)
 MMU_FTR_SECTION_ELSE
-	bl	do_page_fault
+	bl	CFUNC(do_page_fault)
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 #else
-	bl	do_page_fault
+	bl	CFUNC(do_page_fault)
 #endif
 	b	interrupt_return_srr
 
-1:	bl	do_break
+1:	bl	CFUNC(do_break)
 	/*
 	 * do_break() may have changed the NV GPRS while handling a breakpoint.
 	 * If so, we need to restore them with their updated values.
@@ -1493,7 +1493,7 @@ EXC_COMMON_BEGIN(data_access_slb_common)
 BEGIN_MMU_FTR_SECTION
 	/* HPT case, do SLB fault */
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	do_slb_fault
+	bl	CFUNC(do_slb_fault)
 	cmpdi	r3,0
 	bne-	1f
 	b	fast_interrupt_return_srr
@@ -1507,7 +1507,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 #endif
 	std	r3,RESULT(r1)
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	do_bad_segment_interrupt
+	bl	CFUNC(do_bad_segment_interrupt)
 	b	interrupt_return_srr
 
 
@@ -1541,12 +1541,12 @@ EXC_COMMON_BEGIN(instruction_access_common)
 	addi	r3,r1,STACK_INT_FRAME_REGS
 #ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
-	bl	do_hash_fault
+	bl	CFUNC(do_hash_fault)
 MMU_FTR_SECTION_ELSE
-	bl	do_page_fault
+	bl	CFUNC(do_page_fault)
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 #else
-	bl	do_page_fault
+	bl	CFUNC(do_page_fault)
 #endif
 	b	interrupt_return_srr
 
@@ -1581,7 +1581,7 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
 BEGIN_MMU_FTR_SECTION
 	/* HPT case, do SLB fault */
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	do_slb_fault
+	bl	CFUNC(do_slb_fault)
 	cmpdi	r3,0
 	bne-	1f
 	b	fast_interrupt_return_srr
@@ -1595,7 +1595,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 #endif
 	std	r3,RESULT(r1)
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	do_bad_segment_interrupt
+	bl	CFUNC(do_bad_segment_interrupt)
 	b	interrupt_return_srr
 
 
@@ -1649,7 +1649,7 @@ EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
 EXC_COMMON_BEGIN(hardware_interrupt_common)
 	GEN_COMMON hardware_interrupt
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	do_IRQ
+	bl	CFUNC(do_IRQ)
 	BEGIN_FTR_SECTION
 	b	interrupt_return_hsrr
 	FTR_SECTION_ELSE
@@ -1679,7 +1679,7 @@ EXC_VIRT_END(alignment, 0x4600, 0x100)
 EXC_COMMON_BEGIN(alignment_common)
 	GEN_COMMON alignment
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	alignment_exception
+	bl	CFUNC(alignment_exception)
 	HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
 	b	interrupt_return_srr
 
@@ -1745,7 +1745,7 @@ EXC_COMMON_BEGIN(program_check_common)
 
 .Ldo_program_check:
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	program_check_exception
+	bl	CFUNC(program_check_exception)
 	HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
 	b	interrupt_return_srr
 
@@ -1777,7 +1777,7 @@ EXC_COMMON_BEGIN(fp_unavailable_common)
 	GEN_COMMON fp_unavailable
 	bne	1f			/* if from user, just load it up */
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	kernel_fp_unavailable_exception
+	bl	CFUNC(kernel_fp_unavailable_exception)
 0:	trap
 	EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
 1:
@@ -1790,12 +1790,12 @@ BEGIN_FTR_SECTION
 	bne-	2f
 END_FTR_SECTION_IFSET(CPU_FTR_TM)
 #endif
-	bl	load_up_fpu
+	bl	CFUNC(load_up_fpu)
 	b	fast_interrupt_return_srr
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 2:	/* User process was in a transaction */
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	fp_unavailable_tm
+	bl	CFUNC(fp_unavailable_tm)
 	b	interrupt_return_srr
 #endif
 
@@ -1839,7 +1839,7 @@ EXC_VIRT_END(decrementer, 0x4900, 0x80)
 EXC_COMMON_BEGIN(decrementer_common)
 	GEN_COMMON decrementer
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	timer_interrupt
+	bl	CFUNC(timer_interrupt)
 	b	interrupt_return_srr
 
 
@@ -1925,9 +1925,9 @@ EXC_COMMON_BEGIN(doorbell_super_common)
 	GEN_COMMON doorbell_super
 	addi	r3,r1,STACK_INT_FRAME_REGS
 #ifdef CONFIG_PPC_DOORBELL
-	bl	doorbell_exception
+	bl	CFUNC(doorbell_exception)
 #else
-	bl	unknown_async_exception
+	bl	CFUNC(unknown_async_exception)
 #endif
 	b	interrupt_return_srr
 
@@ -2091,7 +2091,7 @@ EXC_VIRT_END(single_step, 0x4d00, 0x100)
 EXC_COMMON_BEGIN(single_step_common)
 	GEN_COMMON single_step
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	single_step_exception
+	bl	CFUNC(single_step_exception)
 	b	interrupt_return_srr
 
 
@@ -2126,9 +2126,9 @@ EXC_COMMON_BEGIN(h_data_storage_common)
 	GEN_COMMON h_data_storage
 	addi    r3,r1,STACK_INT_FRAME_REGS
 BEGIN_MMU_FTR_SECTION
-	bl      do_bad_page_fault_segv
+	bl	CFUNC(do_bad_page_fault_segv)
 MMU_FTR_SECTION_ELSE
-	bl      unknown_exception
+	bl	CFUNC(unknown_exception)
 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
 	b       interrupt_return_hsrr
 
@@ -2154,7 +2154,7 @@ EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
 EXC_COMMON_BEGIN(h_instr_storage_common)
 	GEN_COMMON h_instr_storage
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	unknown_exception
+	bl	CFUNC(unknown_exception)
 	b	interrupt_return_hsrr
 
 
@@ -2177,7 +2177,7 @@ EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
 EXC_COMMON_BEGIN(emulation_assist_common)
 	GEN_COMMON emulation_assist
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	emulation_assist_interrupt
+	bl	CFUNC(emulation_assist_interrupt)
 	HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
 	b	interrupt_return_hsrr
 
@@ -2237,7 +2237,7 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
 	__GEN_COMMON_BODY hmi_exception_early
 
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	hmi_exception_realmode
+	bl	CFUNC(hmi_exception_realmode)
 	cmpdi	cr0,r3,0
 	bne	1f
 
@@ -2255,7 +2255,7 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
 EXC_COMMON_BEGIN(hmi_exception_common)
 	GEN_COMMON hmi_exception
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	handle_hmi_exception
+	bl	CFUNC(handle_hmi_exception)
 	b	interrupt_return_hsrr
 
 
@@ -2290,9 +2290,9 @@ EXC_COMMON_BEGIN(h_doorbell_common)
 	GEN_COMMON h_doorbell
 	addi	r3,r1,STACK_INT_FRAME_REGS
 #ifdef CONFIG_PPC_DOORBELL
-	bl	doorbell_exception
+	bl	CFUNC(doorbell_exception)
 #else
-	bl	unknown_async_exception
+	bl	CFUNC(unknown_async_exception)
 #endif
 	b	interrupt_return_hsrr
 
@@ -2325,7 +2325,7 @@ EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
 EXC_COMMON_BEGIN(h_virt_irq_common)
 	GEN_COMMON h_virt_irq
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	do_IRQ
+	bl	CFUNC(do_IRQ)
 	b	interrupt_return_hsrr
 
 
@@ -2374,10 +2374,10 @@ EXC_COMMON_BEGIN(performance_monitor_common)
 	lbz	r4,PACAIRQSOFTMASK(r13)
 	cmpdi	r4,IRQS_ENABLED
 	bne	1f
-	bl	performance_monitor_exception_async
+	bl	CFUNC(performance_monitor_exception_async)
 	b	interrupt_return_srr
 1:
-	bl	performance_monitor_exception_nmi
+	bl	CFUNC(performance_monitor_exception_nmi)
 	/* Clear MSR_RI before setting SRR0 and SRR1. */
 	li	r9,0
 	mtmsrd	r9,1
@@ -2421,19 +2421,19 @@ BEGIN_FTR_SECTION
 	bne-	2f
   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
 #endif
-	bl	load_up_altivec
+	bl	CFUNC(load_up_altivec)
 	b	fast_interrupt_return_srr
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 2:	/* User process was in a transaction */
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	altivec_unavailable_tm
+	bl	CFUNC(altivec_unavailable_tm)
 	b	interrupt_return_srr
 #endif
 1:
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	altivec_unavailable_exception
+	bl	CFUNC(altivec_unavailable_exception)
 	b	interrupt_return_srr
 
 
@@ -2475,14 +2475,14 @@ BEGIN_FTR_SECTION
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 2:	/* User process was in a transaction */
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	vsx_unavailable_tm
+	bl	CFUNC(vsx_unavailable_tm)
 	b	interrupt_return_srr
 #endif
 1:
 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 #endif
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	vsx_unavailable_exception
+	bl	CFUNC(vsx_unavailable_exception)
 	b	interrupt_return_srr
 
 
@@ -2509,7 +2509,7 @@ EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
 EXC_COMMON_BEGIN(facility_unavailable_common)
 	GEN_COMMON facility_unavailable
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	facility_unavailable_exception
+	bl	CFUNC(facility_unavailable_exception)
 	HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
 	b	interrupt_return_srr
 
@@ -2537,7 +2537,7 @@ EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
 EXC_COMMON_BEGIN(h_facility_unavailable_common)
 	GEN_COMMON h_facility_unavailable
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	facility_unavailable_exception
+	bl	CFUNC(facility_unavailable_exception)
 	/* XXX Shouldn't be necessary in practice */
 	HANDLER_RESTORE_NVGPRS()
 	b	interrupt_return_hsrr
@@ -2568,7 +2568,7 @@ EXC_VIRT_NONE(0x5200, 0x100)
 EXC_COMMON_BEGIN(cbe_system_error_common)
 	GEN_COMMON cbe_system_error
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	cbe_system_error_exception
+	bl	CFUNC(cbe_system_error_exception)
 	b	interrupt_return_hsrr
 
 #else /* CONFIG_CBE_RAS */
@@ -2599,7 +2599,7 @@ EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
 EXC_COMMON_BEGIN(instruction_breakpoint_common)
 	GEN_COMMON instruction_breakpoint
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	instruction_breakpoint_exception
+	bl	CFUNC(instruction_breakpoint_exception)
 	b	interrupt_return_srr
 
 
@@ -2721,7 +2721,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 EXC_COMMON_BEGIN(denorm_exception_common)
 	GEN_COMMON denorm_exception
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	unknown_exception
+	bl	CFUNC(unknown_exception)
 	b	interrupt_return_hsrr
 
 
@@ -2738,7 +2738,7 @@ EXC_VIRT_NONE(0x5600, 0x100)
 EXC_COMMON_BEGIN(cbe_maintenance_common)
 	GEN_COMMON cbe_maintenance
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	cbe_maintenance_exception
+	bl	CFUNC(cbe_maintenance_exception)
 	b	interrupt_return_hsrr
 
 #else /* CONFIG_CBE_RAS */
@@ -2764,10 +2764,10 @@ EXC_COMMON_BEGIN(altivec_assist_common)
 	GEN_COMMON altivec_assist
 	addi	r3,r1,STACK_INT_FRAME_REGS
 #ifdef CONFIG_ALTIVEC
-	bl	altivec_assist_exception
+	bl	CFUNC(altivec_assist_exception)
 	HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
 #else
-	bl	unknown_exception
+	bl	CFUNC(unknown_exception)
 #endif
 	b	interrupt_return_srr
 
@@ -2785,7 +2785,7 @@ EXC_VIRT_NONE(0x5800, 0x100)
 EXC_COMMON_BEGIN(cbe_thermal_common)
 	GEN_COMMON cbe_thermal
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	cbe_thermal_exception
+	bl	CFUNC(cbe_thermal_exception)
 	b	interrupt_return_hsrr
 
 #else /* CONFIG_CBE_RAS */
@@ -2818,7 +2818,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
 	__GEN_COMMON_BODY soft_nmi
 
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	soft_nmi_interrupt
+	bl	CFUNC(soft_nmi_interrupt)
 
 	/* Clear MSR_RI before setting SRR0 and SRR1. */
 	li	r9,0
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 33a5fbfdc180..70e8d653657c 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -604,7 +604,7 @@ __boot_from_prom:
 
 	/* Do all of the interaction with OF client interface */
 	mr	r8,r26
-	bl	prom_init
+	bl	CFUNC(prom_init)
 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
 
 	/* We never return. We also hit that trap if trying to boot
@@ -833,7 +833,7 @@ __secondary_start:
 	 * can turn it on below. This is a call to C, which is OK, we're still
 	 * running on the emergency stack.
 	 */
-	bl	early_setup_secondary
+	bl	CFUNC(early_setup_secondary)
 
 	/*
 	 * The primary has initialized our kernel stack for us in the paca, grab
@@ -872,7 +872,7 @@ start_secondary_prolog:
 	LOAD_PACA_TOC()
 	li	r3,0
 	std	r3,0(r1)		/* Zero the stack frame pointer	*/
-	bl	start_secondary
+	bl	CFUNC(start_secondary)
 	b	.
 /*
  * Reset stack pointer and call start_secondary
@@ -883,7 +883,7 @@ _GLOBAL(start_secondary_resume)
 	ld	r1,PACAKSAVE(r13)	/* Reload kernel stack pointer */
 	li	r3,0
 	std	r3,0(r1)		/* Zero the stack frame pointer	*/
-	bl	start_secondary
+	bl	CFUNC(start_secondary)
 	b	.
 #endif
 
@@ -988,7 +988,7 @@ start_here_multiplatform:
 	 */
 
 #ifdef CONFIG_KASAN
-	bl	kasan_early_init
+	bl	CFUNC(kasan_early_init)
 #endif
 	/* Restore parameters passed from prom_init/kexec */
 	mr	r3,r31
@@ -1021,7 +1021,7 @@ start_here_common:
 	stb	r0,PACAIRQHAPPENED(r13)
 
 	/* Generic kernel entry */
-	bl	start_kernel
+	bl	CFUNC(start_kernel)
 
 	/* Not reached */
 0:	trap
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index fccc34489add..23d7cdc5a23e 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -101,12 +101,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 	 * state of kernel code.
 	 */
 	SANITIZE_SYSCALL_GPRS()
-	bl	system_call_exception
+	bl	CFUNC(system_call_exception)
 
 .Lsyscall_vectored_\name\()_exit:
 	addi	r4,r1,STACK_INT_FRAME_REGS
 	li	r5,1 /* scv */
-	bl	syscall_exit_prepare
+	bl	CFUNC(syscall_exit_prepare)
 	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
 .Lsyscall_vectored_\name\()_rst_start:
 	lbz	r11,PACAIRQHAPPENED(r13)
@@ -185,7 +185,7 @@ _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
 	addi	r4,r1,STACK_INT_FRAME_REGS
 	li	r11,IRQS_ALL_DISABLED
 	stb	r11,PACAIRQSOFTMASK(r13)
-	bl	syscall_exit_restart
+	bl	CFUNC(syscall_exit_restart)
 	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
 	b	.Lsyscall_vectored_\name\()_rst_start
 1:
@@ -286,12 +286,12 @@ END_BTB_FLUSH_SECTION
 	 * state of kernel code.
 	 */
 	SANITIZE_SYSCALL_GPRS()
-	bl	system_call_exception
+	bl	CFUNC(system_call_exception)
 
 .Lsyscall_exit:
 	addi	r4,r1,STACK_INT_FRAME_REGS
 	li	r5,0 /* !scv */
-	bl	syscall_exit_prepare
+	bl	CFUNC(syscall_exit_prepare)
 	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
 #ifdef CONFIG_PPC_BOOK3S
 .Lsyscall_rst_start:
@@ -372,7 +372,7 @@ _ASM_NOKPROBE_SYMBOL(syscall_restart)
 	addi	r4,r1,STACK_INT_FRAME_REGS
 	li	r11,IRQS_ALL_DISABLED
 	stb	r11,PACAIRQSOFTMASK(r13)
-	bl	syscall_exit_restart
+	bl	CFUNC(syscall_exit_restart)
 	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
 	b	.Lsyscall_rst_start
 1:
@@ -401,7 +401,7 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
 	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
 	bne+	.Lfast_kernel_interrupt_return_srr
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	unrecoverable_exception
+	bl	CFUNC(unrecoverable_exception)
 	b	. /* should not get here */
 #else
 	bne	.Lfast_user_interrupt_return_srr
@@ -419,7 +419,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	interrupt_exit_user_prepare
+	bl	CFUNC(interrupt_exit_user_prepare)
 #ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
 	cmpdi	r3,0
 	bne-	.Lrestore_nvgprs_\srr
@@ -523,7 +523,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
 	addi	r3,r1,STACK_INT_FRAME_REGS
 	li	r11,IRQS_ALL_DISABLED
 	stb	r11,PACAIRQSOFTMASK(r13)
-	bl	interrupt_exit_user_restart
+	bl	CFUNC(interrupt_exit_user_restart)
 	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
 	b	.Linterrupt_return_\srr\()_user_rst_start
 1:
@@ -536,7 +536,7 @@ RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr
 interrupt_return_\srr\()_kernel:
 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
 	addi	r3,r1,STACK_INT_FRAME_REGS
-	bl	interrupt_exit_kernel_prepare
+	bl	CFUNC(interrupt_exit_kernel_prepare)
 
 	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
 .Linterrupt_return_\srr\()_kernel_rst_start:
@@ -705,7 +705,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
 	addi	r3,r1,STACK_INT_FRAME_REGS
 	li	r11,IRQS_ALL_DISABLED
 	stb	r11,PACAIRQSOFTMASK(r13)
-	bl	interrupt_exit_kernel_restart
+	bl	CFUNC(interrupt_exit_kernel_restart)
 	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
 	b	.Linterrupt_return_\srr\()_kernel_rst_start
 1:
@@ -727,20 +727,20 @@ DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
 
 #ifdef CONFIG_PPC_BOOK3S
 _GLOBAL(ret_from_fork_scv)
-	bl	schedule_tail
+	bl	CFUNC(schedule_tail)
 	REST_NVGPRS(r1)
 	li	r3,0	/* fork() return value */
 	b	.Lsyscall_vectored_common_exit
 #endif
 
 _GLOBAL(ret_from_fork)
-	bl	schedule_tail
+	bl	CFUNC(schedule_tail)
 	REST_NVGPRS(r1)
 	li	r3,0	/* fork() return value */
 	b	.Lsyscall_exit
 
 _GLOBAL(ret_from_kernel_thread)
-	bl	schedule_tail
+	bl	CFUNC(schedule_tail)
 	REST_NVGPRS(r1)
 	mtctr	r14
 	mr	r3,r15
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c39c07a4c06e..2c9ac70aaf0c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -432,7 +432,7 @@ _GLOBAL(kexec_sequence)
 1:
 	/* copy dest pages, flush whole dest image */
 	mr	r3,r29
-	bl	kexec_copy_flush	/* (image) */
+	bl	CFUNC(kexec_copy_flush)	/* (image) */
 
 	/* turn off mmu now if not done earlier */
 	cmpdi	r26,0
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S
index 0c4ecc8fec5a..48fc6658053a 100644
--- a/arch/powerpc/kernel/vdso/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso/gettimeofday.S
@@ -38,7 +38,11 @@
 	.else
 	addi		r4, r5, VDSO_DATA_OFFSET
 	.endif
-	bl		DOTSYM(\funct)
+#ifdef __powerpc64__
+	bl		CFUNC(DOTSYM(\funct))
+#else
+	bl		\funct
+#endif
 	PPC_LL		r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
 #ifdef __powerpc64__
 	PPC_LL		r2, PPC_MIN_STKFRM + STK_GOT(r1)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index acf80915f406..3eab313843b7 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -381,7 +381,7 @@ kvm_secondary_got_guest:
 	bne	kvm_no_guest
 
 	li	r3,0			/* NULL argument */
-	bl	hmi_exception_realmode
+	bl	CFUNC(hmi_exception_realmode)
 /*
  * At this point we have finished executing in the guest.
  * We need to wait for hwthread_req to become zero, since
@@ -458,7 +458,7 @@ kvm_unsplit_nap:
 	cmpwi	r12, BOOK3S_INTERRUPT_HMI
 	bne	55f
 	li	r3, 0			/* NULL argument */
-	bl	hmi_exception_realmode
+	bl	CFUNC(hmi_exception_realmode)
 55:
 	/*
 	 * Ensure that secondary doesn't nap when it has
@@ -859,7 +859,7 @@ deliver_guest_interrupt:	/* r4 = vcpu, r13 = paca */
 	cmpdi	r0, 0
 	beq	71f
 	mr	r3, r4
-	bl	kvmppc_guest_entry_inject_int
+	bl	CFUNC(kvmppc_guest_entry_inject_int)
 	ld	r4, HSTATE_KVM_VCPU(r13)
 71:
 	ld	r6, VCPU_SRR0(r4)
@@ -1544,7 +1544,7 @@ kvmppc_guest_external:
 	/* External interrupt, first check for host_ipi. If this is
 	 * set, we know the host wants us out so let's do it now
 	 */
-	bl	kvmppc_read_intr
+	bl	CFUNC(kvmppc_read_intr)
 
 	/*
 	 * Restore the active volatile registers after returning from
@@ -1626,7 +1626,7 @@ kvmppc_hdsi:
 	/* Search the hash table. */
 	mr	r3, r9			/* vcpu pointer */
 	li	r7, 1			/* data fault */
-	bl	kvmppc_hpte_hv_fault
+	bl	CFUNC(kvmppc_hpte_hv_fault)
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	ld	r10, VCPU_PC(r9)
 	ld	r11, VCPU_MSR(r9)
@@ -1702,7 +1702,7 @@ kvmppc_hisi:
 	mr	r4, r10
 	mr	r6, r11
 	li	r7, 0			/* instruction fault */
-	bl	kvmppc_hpte_hv_fault
+	bl	CFUNC(kvmppc_hpte_hv_fault)
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	ld	r10, VCPU_PC(r9)
 	ld	r11, VCPU_MSR(r9)
@@ -2342,7 +2342,7 @@ hmi_realmode:
 	lbz	r0, HSTATE_PTID(r13)
 	cmpwi	r0, 0
 	bne	guest_exit_cont
-	bl	kvmppc_realmode_hmi_handler
+	bl	CFUNC(kvmppc_realmode_hmi_handler)
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	li	r12, BOOK3S_INTERRUPT_HMI
 	b	guest_exit_cont
@@ -2413,7 +2413,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 7:	mflr	r0
 	std	r0, PPC_LR_STKOFF(r1)
 	stdu	r1, -PPC_MIN_STKFRM(r1)
-	bl	kvmppc_read_intr
+	bl	CFUNC(kvmppc_read_intr)
 	nop
 	li	r12, BOOK3S_INTERRUPT_EXTERNAL
 	cmpdi	r3, 1
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index a9844c6353cf..a783973f1215 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -45,7 +45,7 @@ _GLOBAL(copypage_power7)
 	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
 	std	r0,16(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
-	bl	enter_vmx_ops
+	bl	CFUNC(enter_vmx_ops)
 	cmpwi	r3,0
 	ld	r0,STACKFRAMESIZE+16(r1)
 	ld	r3,STK_REG(R31)(r1)
@@ -88,7 +88,7 @@ _GLOBAL(copypage_power7)
 	addi	r3,r3,128
 	bdnz	1b
 
-	b	exit_vmx_ops		/* tail call optimise */
+	b	CFUNC(exit_vmx_ops)		/* tail call optimise */
 
 #else
 	li	r0,(PAGE_SIZE/128)
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 28f0be523c06..ac41053c3a5a 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -47,7 +47,7 @@
 	ld	r15,STK_REG(R15)(r1)
 	ld	r14,STK_REG(R14)(r1)
 .Ldo_err3:
-	bl	exit_vmx_usercopy
+	bl	CFUNC(exit_vmx_usercopy)
 	ld	r0,STACKFRAMESIZE+16(r1)
 	mtlr	r0
 	b	.Lexit
@@ -272,7 +272,7 @@ err1;	stb	r0,0(r3)
 	mflr	r0
 	std	r0,16(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
-	bl	enter_vmx_usercopy
+	bl	CFUNC(enter_vmx_usercopy)
 	cmpwi	cr1,r3,0
 	ld	r0,STACKFRAMESIZE+16(r1)
 	ld	r3,STK_REG(R31)(r1)
@@ -488,7 +488,7 @@ err3;	lbz	r0,0(r4)
 err3;	stb	r0,0(r3)
 
 15:	addi	r1,r1,STACKFRAMESIZE
-	b	exit_vmx_usercopy	/* tail call optimise */
+	b	CFUNC(exit_vmx_usercopy)	/* tail call optimise */
 
 .Lvmx_unaligned_copy:
 	/* Get the destination 16B aligned */
@@ -691,5 +691,5 @@ err3;	lbz	r0,0(r4)
 err3;	stb	r0,0(r3)
 
 15:	addi	r1,r1,STACKFRAMESIZE
-	b	exit_vmx_usercopy	/* tail call optimise */
+	b	CFUNC(exit_vmx_usercopy)	/* tail call optimise */
 #endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
index 6effad901ef7..09af29561314 100644
--- a/arch/powerpc/lib/hweight_64.S
+++ b/arch/powerpc/lib/hweight_64.S
@@ -14,7 +14,7 @@
 
 _GLOBAL(__arch_hweight8)
 BEGIN_FTR_SECTION
-	b __sw_hweight8
+	b CFUNC(__sw_hweight8)
 	nop
 	nop
 FTR_SECTION_ELSE
@@ -26,7 +26,7 @@ EXPORT_SYMBOL(__arch_hweight8)
 
 _GLOBAL(__arch_hweight16)
 BEGIN_FTR_SECTION
-	b __sw_hweight16
+	b CFUNC(__sw_hweight16)
 	nop
 	nop
 	nop
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(__arch_hweight16)
 
 _GLOBAL(__arch_hweight32)
 BEGIN_FTR_SECTION
-	b __sw_hweight32
+	b CFUNC(__sw_hweight32)
 	nop
 	nop
 	nop
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(__arch_hweight32)
 
 _GLOBAL(__arch_hweight64)
 BEGIN_FTR_SECTION
-	b __sw_hweight64
+	b CFUNC(__sw_hweight64)
 	nop
 	nop
 	nop
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 384218df71ba..0b9b1685a33d 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -44,7 +44,7 @@
 	std     r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
 	std     r0,16(r1); \
 	stdu    r1,-STACKFRAMESIZE(r1); \
-	bl      enter_vmx_ops; \
+	bl      CFUNC(enter_vmx_ops); \
 	cmpwi   cr1,r3,0; \
 	ld      r0,STACKFRAMESIZE+16(r1); \
 	ld      r3,STK_REG(R31)(r1); \
@@ -60,7 +60,7 @@
 	std     r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
 	std     r0,16(r1); \
 	stdu    r1,-STACKFRAMESIZE(r1); \
-	bl      exit_vmx_ops; \
+	bl      CFUNC(exit_vmx_ops); \
 	ld      r0,STACKFRAMESIZE+16(r1); \
 	ld      r3,STK_REG(R31)(r1); \
 	ld      r4,STK_REG(R30)(r1); \
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 54f226333c94..9398b2b746c4 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -218,7 +218,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 	std	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
 	std	r0,16(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
-	bl	enter_vmx_ops
+	bl	CFUNC(enter_vmx_ops)
 	cmpwi	cr1,r3,0
 	ld	r0,STACKFRAMESIZE+16(r1)
 	ld	r3,STK_REG(R31)(r1)
@@ -433,7 +433,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 
 15:	addi	r1,r1,STACKFRAMESIZE
 	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
-	b	exit_vmx_ops		/* tail call optimise */
+	b	CFUNC(exit_vmx_ops)		/* tail call optimise */
 
 .Lvmx_unaligned_copy:
 	/* Get the destination 16B aligned */
@@ -637,5 +637,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 
 15:	addi	r1,r1,STACKFRAMESIZE
 	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
-	b	exit_vmx_ops		/* tail call optimise */
+	b	CFUNC(exit_vmx_ops)		/* tail call optimise */
 #endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 783c16ad648b..35254ac7af5e 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -44,7 +44,7 @@ hcall_tracepoint_refcount:
 	std	r0,16(r1);					\
 	addi	r4,r1,STK_PARAM(FIRST_REG);			\
 	stdu	r1,-STACK_FRAME_MIN_SIZE(r1);			\
-	bl	__trace_hcall_entry;				\
+	bl	CFUNC(__trace_hcall_entry);			\
 	ld	r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1);	\
 	ld	r4,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1);	\
 	ld	r5,STACK_FRAME_MIN_SIZE+STK_PARAM(R5)(r1);	\
@@ -63,7 +63,7 @@ hcall_tracepoint_refcount:
 	std	r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1);	\
 	mr	r4,r3;						\
 	mr	r3,r0;						\
-	bl	__trace_hcall_exit;				\
+	bl	CFUNC(__trace_hcall_exit);			\
 	ld	r0,STACK_FRAME_MIN_SIZE+16(r1);			\
 	addi	r1,r1,STACK_FRAME_MIN_SIZE;			\
 	ld	r3,STK_PARAM(R3)(r1);				\
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 7/9] powerpc/64: Add support to build with prefixed instructions
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
                   ` (5 preceding siblings ...)
  2022-12-27  9:26 ` [RFC PATCH 6/9] powerpc: add CFUNC assembly label annotation Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 8/9] powerpc/64: vmlinux support building with PCREL addresing Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 9/9] powerpc/64: modules " Nicholas Piggin
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

Add an option to build kernel and module with prefixed instructions if
the CPU and toolchain support it.

This is not related to kernel support for userspace execution of
prefixed instructions.

Building with prefixed instructions breaks some extended inline asm
memory addressing, for example it will provide immediates that exceed
the range of simple load/store displacement. Whether this is a
toolchain or a kernel asm problem remains to be seen. For now, these
are replaced with simpler and less efficient direct register addressing
when compiling with prefixed.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/Kconfig                   |  3 +++
 arch/powerpc/Makefile                  |  4 +++
 arch/powerpc/include/asm/atomic.h      | 24 ++++++++++++++---
 arch/powerpc/include/asm/io.h          | 37 ++++++++++++++++++++++++++
 arch/powerpc/include/asm/uaccess.h     | 28 +++++++++++++++++--
 arch/powerpc/kernel/trace/ftrace.c     |  2 ++
 arch/powerpc/platforms/Kconfig.cputype | 20 ++++++++++++++
 7 files changed, 112 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b8c4ac56bddc..f8ee94785f8c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -4,6 +4,9 @@ source "arch/powerpc/platforms/Kconfig.cputype"
 config CC_HAS_ELFV2
 	def_bool PPC64 && $(cc-option, -mabi=elfv2)
 
+config CC_HAS_PREFIXED
+	def_bool PPC64 && $(cc-option, -mcpu=power10 -mprefixed)
+
 config 32BIT
 	bool
 	default y if PPC32
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index dc4cbf0a5ca9..5b6c276bb690 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -193,7 +193,11 @@ ifdef CONFIG_476FPE_ERR46
 endif
 
 # No prefix or pcrel
+ifdef CONFIG_PPC_KERNEL_PREFIXED
+KBUILD_CFLAGS += $(call cc-option,-mprefixed)
+else
 KBUILD_CFLAGS += $(call cc-option,-mno-prefixed)
+endif
 KBUILD_CFLAGS += $(call cc-option,-mno-pcrel)
 
 # No AltiVec or VSX or MMA instructions when building kernel
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 486ab7889121..50212c44be2a 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -27,14 +27,22 @@ static __inline__ int arch_atomic_read(const atomic_t *v)
 {
 	int t;
 
-	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+	/* -mprefixed can generate offsets beyond range, fall back hack */
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+		__asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+	else
+		__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
 
 	return t;
 }
 
 static __inline__ void arch_atomic_set(atomic_t *v, int i)
 {
-	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+	/* -mprefixed can generate offsets beyond range, fall back hack */
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+		__asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+	else
+		__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
 }
 
 #define ATOMIC_OP(op, asm_op, suffix, sign, ...)			\
@@ -226,14 +234,22 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
 {
 	s64 t;
 
-	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+	/* -mprefixed can generate offsets beyond range, fall back hack */
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+		__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+	else
+		__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
 
 	return t;
 }
 
 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
 {
-	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+	/* -mprefixed can generate offsets beyond range, fall back hack */
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+		__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+	else
+		__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
 }
 
 #define ATOMIC64_OP(op, asm_op)						\
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index fc112a91d0c2..f1e657c9bbe8 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -97,6 +97,42 @@ extern bool isa_io_special;
  *
  */
 
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define DEF_MMIO_IN_X(name, size, insn)				\
+static inline u##size name(const volatile u##size __iomem *addr)	\
+{									\
+	u##size ret;							\
+	__asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync"	\
+		: "=r" (ret) : "r" (addr) : "memory");			\
+	return ret;							\
+}
+
+#define DEF_MMIO_OUT_X(name, size, insn)				\
+static inline void name(volatile u##size __iomem *addr, u##size val)	\
+{									\
+	__asm__ __volatile__("sync;"#insn" %1,0,%0"			\
+		: : "r" (addr), "r" (val) : "memory");			\
+	mmiowb_set_pending();						\
+}
+
+#define DEF_MMIO_IN_D(name, size, insn)				\
+static inline u##size name(const volatile u##size __iomem *addr)	\
+{									\
+	u##size ret;							\
+	__asm__ __volatile__("sync;"#insn" %0,0(%1);twi 0,%0,0;isync"\
+		: "=r" (ret) : "b" (addr) : "memory");	\
+	return ret;							\
+}
+
+#define DEF_MMIO_OUT_D(name, size, insn)				\
+static inline void name(volatile u##size __iomem *addr, u##size val)	\
+{									\
+	__asm__ __volatile__("sync;"#insn" %1,0(%0)"			\
+		: : "b" (addr), "r" (val) : "memory");	\
+	mmiowb_set_pending();						\
+}
+#else
 #define DEF_MMIO_IN_X(name, size, insn)				\
 static inline u##size name(const volatile u##size __iomem *addr)	\
 {									\
@@ -130,6 +166,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val)	\
 		: "=m<>" (*addr) : "r" (val) : "memory");	\
 	mmiowb_set_pending();						\
 }
+#endif
 
 DEF_MMIO_IN_D(in_8,     8, lbz);
 DEF_MMIO_OUT_D(out_8,   8, stb);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 3ddc65c63a49..722b1ec12cff 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -71,14 +71,26 @@ __pu_failed:							\
  * because we do not write to any memory gcc knows about, so there
  * are no aliasing issues.
  */
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define __put_user_asm_goto(x, addr, label, op)			\
+	asm_volatile_goto(					\
+		"1:	" op " %0,0(%1)	# put_user\n"		\
+		EX_TABLE(1b, %l2)				\
+		:						\
+		: "r" (x), "b" (addr)				\
+		:						\
+		: label)
+#else
 #define __put_user_asm_goto(x, addr, label, op)			\
 	asm_volatile_goto(					\
 		"1:	" op "%U1%X1 %0,%1	# put_user\n"	\
 		EX_TABLE(1b, %l2)				\
 		:						\
-		: "r" (x), "m<>" (*addr)		\
+		: "r" (x), "m<>" (*addr)			\
 		:						\
 		: label)
+#endif
 
 #ifdef __powerpc64__
 #define __put_user_asm2_goto(x, ptr, label)			\
@@ -131,14 +143,26 @@ do {								\
 
 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
 
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define __get_user_asm_goto(x, addr, label, op)			\
+	asm_volatile_goto(					\
+		"1:	"op" %0,0(%1)	# get_user\n"		\
+		EX_TABLE(1b, %l2)				\
+		: "=r" (x)					\
+		: "b" (addr)					\
+		:						\
+		: label)
+#else
 #define __get_user_asm_goto(x, addr, label, op)			\
 	asm_volatile_goto(					\
 		"1:	"op"%U1%X1 %0, %1	# get_user\n"	\
 		EX_TABLE(1b, %l2)				\
 		: "=r" (x)					\
-		: "m<>" (*addr)				\
+		: "m<>" (*addr)					\
 		:						\
 		: label)
+#endif
 
 #ifdef __powerpc64__
 #define __get_user_asm2_goto(x, addr, label)			\
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 7b85c3b460a3..72864fb7a6cc 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -194,6 +194,8 @@ __ftrace_make_nop(struct module *mod,
 	 * get corrupted.
 	 *
 	 * Use a b +8 to jump over the load.
+	 * XXX: could make PCREL depend on MPROFILE_KERNEL
+	 * XXX: check PCREL && MPROFILE_KERNEL calling sequence
 	 */
 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
 		pop = ppc_inst(PPC_RAW_NOP());
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 9563336e3348..0d9646101caf 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -180,6 +180,7 @@ config POWER10_CPU
 	bool "POWER10"
 	depends on PPC_BOOK3S_64
 	select ARCH_HAS_FAST_MULTIPLIER
+	select PPC_HAVE_PREFIXED_SUPPORT
 
 config E5500_CPU
 	bool "Freescale e5500"
@@ -444,6 +445,22 @@ config PPC_RADIX_MMU_DEFAULT
 
 	  If you're unsure, say Y.
 
+config PPC_KERNEL_PREFIXED
+	depends on PPC_HAVE_PREFIXED_SUPPORT
+	depends on CC_HAS_PREFIXED
+	default n
+	bool "Build Kernel with Prefixed Instructions"
+	help
+	  POWER10 and later CPUs support prefixed instructions, 8 byte
+	  instructions that include large immediate, pc relative addressing,
+	  and various floating point, vector, MMA.
+
+	  This option builds the kernel with prefixed instructions, and
+	  allows a pc relative addressing option to be selected.
+
+	  Kernel support for prefixed instructions in applications and guests
+          is not affected by this option.
+
 config PPC_KUEP
 	bool "Kernel Userspace Execution Prevention" if !40x
 	default y if !40x
@@ -480,6 +497,9 @@ config PPC_MMU_NOHASH
 config PPC_HAVE_PMU_SUPPORT
 	bool
 
+config PPC_HAVE_PREFIXED_SUPPORT
+	bool
+
 config PMU_SYSFS
 	bool "Create PMU SPRs sysfs file"
 	default n
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 8/9] powerpc/64: vmlinux support building with PCREL addresing
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
                   ` (6 preceding siblings ...)
  2022-12-27  9:26 ` [RFC PATCH 7/9] powerpc/64: Add support to build with prefixed instructions Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  2022-12-27  9:26 ` [RFC PATCH 9/9] powerpc/64: modules " Nicholas Piggin
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

PC-Relative or PCREL addressing is an extension to the ELF ABI which
uses Power ISA v3.1 PC-relative instructions to calculate addresses,
rather than the traditional TOC scheme.

Add an option to build vmlinux using PCREL addressing. Modules continue
to use TOC addressing.

- TOC address helpers and r2 are poisoned with -1 when running vmlinux.
  r2 could be used for something useful once things are ironed out.

- Assembly must call C functions with @notoc annotation, or the linker
  complains aobut a missing nop after the call. This is done with the
  CFUNC macro introduced earlier.

- Boot: with the exception of prom_init, the execution branches to the
  kernel virtual address early in boot, before any addresses are
  generated, which ensures 34-bit pcrel addressing does not miss the
  high PAGE_OFFSET bits. TOC relative addressing has a similar
  requirement. prom_init does not go to the virtual address and its
  addresses should not carry over to the post-prom kernel.

- Ftrace trampolines must be converted from TOC addressing to PCREL
  addressing, including module ftrace trampolines that currently use the
  kernel TOC to find ftrace target functions.

- BPF function prologue and function calling generation must be
  converted from TOC to PCREL (XXX: not well tested yet).

- copypage_64.S has an interesting problem, prefixed instructions have
  alignment restrictions so the linker can add padding, which makes the
  assembler treat the difference between two local labels as
  non-constant even if alignment is arranged so padding is not required.
  This may need toolchain help to solve nicely, for now move the prefix
  instruction out of the alternate patch section to work around it.

This reduces kernel text size by about 6%.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/Kconfig                   |  3 ++
 arch/powerpc/Makefile                  |  7 +++
 arch/powerpc/include/asm/paca.h        |  2 +
 arch/powerpc/include/asm/ppc-opcode.h  |  8 ++++
 arch/powerpc/include/asm/ppc_asm.h     | 19 ++++++++
 arch/powerpc/include/asm/sections.h    |  5 +++
 arch/powerpc/kernel/asm-offsets.c      |  2 +
 arch/powerpc/kernel/head_64.S          | 14 ++++++
 arch/powerpc/kernel/irq.c              |  8 ++++
 arch/powerpc/kernel/module_64.c        | 60 +++++++++++++++++++-------
 arch/powerpc/kernel/paca.c             |  2 +
 arch/powerpc/kernel/trace/ftrace.c     | 50 ++++++++++++++++-----
 arch/powerpc/kernel/vector.S           |  6 +++
 arch/powerpc/kernel/vmlinux.lds.S      |  6 +++
 arch/powerpc/lib/copypage_64.S         |  4 +-
 arch/powerpc/net/bpf_jit.h             | 10 +++--
 arch/powerpc/net/bpf_jit_comp64.c      | 35 +++++++++++----
 arch/powerpc/platforms/Kconfig.cputype | 18 ++++++++
 arch/powerpc/xmon/xmon.c               |  2 +
 19 files changed, 221 insertions(+), 40 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f8ee94785f8c..8679f9ac1406 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -7,6 +7,9 @@ config CC_HAS_ELFV2
 config CC_HAS_PREFIXED
 	def_bool PPC64 && $(cc-option, -mcpu=power10 -mprefixed)
 
+config CC_HAS_PCREL
+	def_bool PPC64 && $(cc-option, -mcpu=power10 -mpcrel)
+
 config 32BIT
 	bool
 	default y if PPC32
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 5b6c276bb690..7bd83d124c1e 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -107,6 +107,9 @@ LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) += -z notext
 LDFLAGS_vmlinux	:= $(LDFLAGS_vmlinux-y)
 
 ifdef CONFIG_PPC64
+ifdef CONFIG_PPC_KERNEL_PCREL
+	KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-pcrel)
+endif
 ifeq ($(call cc-option-yn,-mcmodel=medium),y)
 	# -mcmodel=medium breaks modules because it uses 32bit offsets from
 	# the TOC pointer to create pointers where possible. Pointers into the
@@ -198,7 +201,11 @@ KBUILD_CFLAGS += $(call cc-option,-mprefixed)
 else
 KBUILD_CFLAGS += $(call cc-option,-mno-prefixed)
 endif
+ifdef CONFIG_PPC_KERNEL_PCREL
+KBUILD_CFLAGS += $(call cc-option,-mpcrel)
+else
 KBUILD_CFLAGS += $(call cc-option,-mno-pcrel)
+endif
 
 # No AltiVec or VSX or MMA instructions when building kernel
 KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 09f1790d0ae1..366ce872e6f3 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -88,7 +88,9 @@ struct paca_struct {
 	u16 lock_token;			/* Constant 0x8000, used in locks */
 #endif
 
+#ifndef CONFIG_PPC_KERNEL_PREFIXED
 	u64 kernel_toc;			/* Kernel TOC address */
+#endif
 	u64 kernelbase;			/* Base address of kernel */
 	u64 kernel_msr;			/* MSR while running in kernel */
 	void *emergency_sp;		/* pointer to emergency stack */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 21e33e46f4b8..ca5a0da7df4e 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -120,11 +120,18 @@
  * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
  * (e.g. LD, ADDI).  If the bottom 16 bits is "-ve", add another bit into the
  * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
+ *
+ * XXX: should these mask out possible sign bits?
  */
 #define IMM_H(i)                ((uintptr_t)(i)>>16)
 #define IMM_HA(i)               (((uintptr_t)(i)>>16) +                       \
 					(((uintptr_t)(i) & 0x8000) >> 15))
 
+/*
+ * 18-bit immediate helper for prefix 18-bit upper immediate si0 field.
+ */
+#define IMM_H18(i)              (((uintptr_t)(i)>>16) & 0x3ffff)
+
 
 /* opcode and xopcode for instructions */
 #define OP_PREFIX	1
@@ -306,6 +313,7 @@
 #define PPC_PREFIX_8LS			0x04000000
 
 /* Prefixed instructions */
+#define PPC_INST_PADDI			0x38000000
 #define PPC_INST_PLD			0xe4000000
 #define PPC_INST_PSTD			0xf4000000
 
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 9f64f9a6a897..9315f007d010 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -183,7 +183,11 @@
 /*
  * Used to name C functions called from asm
  */
+#if defined(CONFIG_PPC_KERNEL_PCREL) && !defined(MODULE)
+#define CFUNC(name) name@notoc
+#else
 #define CFUNC(name) name
+#endif
 
 /*
  * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
@@ -212,6 +216,9 @@
 	.globl name; \
 name:
 
+#if defined(CONFIG_PPC_KERNEL_PCREL) && !defined(MODULE)
+#define _GLOBAL_TOC _GLOBAL
+#else
 #define _GLOBAL_TOC(name) \
 	.align 2 ; \
 	.type name,@function; \
@@ -220,6 +227,7 @@ name: \
 0:	addis r2,r12,(.TOC.-0b)@ha; \
 	addi r2,r2,(.TOC.-0b)@l; \
 	.localentry name,.-name
+#endif
 
 #define DOTSYM(a)	a
 
@@ -351,8 +359,13 @@ GLUE(.,name):
 
 #ifdef __powerpc64__
 
+#ifdef CONFIG_PPC_KERNEL_PCREL
+#define __LOAD_PACA_TOC(reg)			\
+	li	reg,-1
+#else
 #define __LOAD_PACA_TOC(reg)			\
 	ld	reg,PACATOC(r13)
+#endif
 
 #define LOAD_PACA_TOC()				\
 	__LOAD_PACA_TOC(r2)
@@ -366,9 +379,15 @@ GLUE(.,name):
 	ori	reg, reg, (expr)@l;		\
 	rldimi	reg, tmp, 32, 0
 
+#if defined(CONFIG_PPC_KERNEL_PCREL) && !defined(MODULE)
+#define LOAD_REG_ADDR(reg,name)			\
+	pla	reg,name@pcrel
+
+#else
 #define LOAD_REG_ADDR(reg,name)			\
 	addis	reg,r2,name@toc@ha;		\
 	addi	reg,reg,name@toc@l
+#endif
 
 #ifdef CONFIG_PPC_BOOK3E_64
 /*
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 9c00c9c0ca8f..4e1f548c8d37 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -46,10 +46,15 @@ extern char end_virt_trampolines[];
  */
 static inline unsigned long kernel_toc_addr(void)
 {
+#ifdef CONFIG_PPC_KERNEL_PCREL
+	BUILD_BUG();
+	return -1UL;
+#else
 	unsigned long toc_ptr;
 
 	asm volatile("mr %0, 2" : "=r" (toc_ptr));
 	return toc_ptr;
+#endif
 }
 
 static inline int overlaps_interrupt_vector_text(unsigned long start,
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d24a59a98c0c..9f14d95b8b32 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -185,7 +185,9 @@ int main(void)
 				 offsetof(struct task_struct, thread_info));
 	OFFSET(PACASAVEDMSR, paca_struct, saved_msr);
 	OFFSET(PACAR1, paca_struct, saved_r1);
+#ifndef CONFIG_PPC_KERNEL_PCREL
 	OFFSET(PACATOC, paca_struct, kernel_toc);
+#endif
 	OFFSET(PACAKBASE, paca_struct, kernelbase);
 	OFFSET(PACAKMSR, paca_struct, kernel_msr);
 #ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 70e8d653657c..dba3b053ddff 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -330,6 +330,12 @@ _GLOBAL(fsl_secondary_thread_init)
  */
 _GLOBAL(generic_secondary_smp_init)
 	FIXUP_ENDIAN
+
+	li	r13,0
+
+	/* Poison TOC */
+	li	r2,-1
+
 	mr	r24,r3
 	mr	r25,r4
 
@@ -527,6 +533,9 @@ __start_initialization_multiplatform:
 	/* Zero r13 (paca) so early program check / mce don't use it */
 	li	r13,0
 
+	/* Poison TOC */
+	li	r2,-1
+
 	/*
 	 * Are we booted from a PROM Of-type client-interface ?
 	 */
@@ -918,6 +927,10 @@ SYM_FUNC_END(enable_64b_mode)
  * this.
  */
 _GLOBAL(relative_toc)
+#ifdef CONFIG_PPC_KERNEL_PCREL
+	tdnei	r2,-1
+	blr
+#else
 	mflr	r0
 	bcl	20,31,$+4
 0:	mflr	r11
@@ -928,6 +941,7 @@ _GLOBAL(relative_toc)
 
 .balign 8
 p_toc:	.8byte	.TOC. - 0b
+#endif
 
 /*
  * This is where the main kernel code starts.
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c5b9ce887483..5d1cc78eaea2 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -206,7 +206,11 @@ static __always_inline void call_do_softirq(const void *sp)
 	asm volatile (
 		 PPC_STLU "	%%r1, %[offset](%[sp])	;"
 		"mr		%%r1, %[sp]		;"
+#ifdef CONFIG_PPC_KERNEL_PCREL
+		"bl		%[callee]@notoc		;"
+#else
 		"bl		%[callee]		;"
+#endif
 		 PPC_LL "	%%r1, 0(%%r1)		;"
 		 : // Outputs
 		 : // Inputs
@@ -259,7 +263,11 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
 		 PPC_STLU "	%%r1, %[offset](%[sp])	;"
 		"mr		%%r4, %%r1		;"
 		"mr		%%r1, %[sp]		;"
+#ifdef CONFIG_PPC_KERNEL_PCREL
+		"bl		%[callee]@notoc		;"
+#else
 		"bl		%[callee]		;"
+#endif
 		 PPC_LL "	%%r1, 0(%%r1)		;"
 		 : // Outputs
 		   "+r" (r3)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ff045644f13f..de01ded00281 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -101,17 +101,18 @@ static unsigned long stub_func_addr(func_desc_t func)
 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into
    the kernel itself).  But on PPC64, these need to be used for every
    jump, actually, to reset r2 (TOC+0x8000). */
-struct ppc64_stub_entry
-{
-	/* 28 byte jump instruction sequence (7 instructions). We only
-	 * need 6 instructions on ABIv2 but we always allocate 7 so
-	 * so we don't have to modify the trampoline load instruction. */
+struct ppc64_stub_entry {
+	/*
+	 * 28 byte jump instruction sequence (7 instructions) that can
+	 * hold ppc64_stub_insns or stub_insns. Must be 8-byte aligned
+	 * with PCREL kernels that use prefix instructions in the stub.
+	 */
 	u32 jump[7];
 	/* Used by ftrace to identify stubs */
 	u32 magic;
 	/* Data for the above code */
 	func_desc_t funcdata;
-};
+} __aligned(8);
 
 /*
  * PPC64 uses 24 bit jumps, but we need to jump into other modules or
@@ -333,11 +334,21 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
 #ifdef CONFIG_MPROFILE_KERNEL
 
 static u32 stub_insns[] = {
+#ifdef CONFIG_PPC_KERNEL_PCREL
+	PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)),
+	PPC_RAW_NOP(), /* align the prefix insn */
+	/* pli r12,addr */
+	PPC_PREFIX_MLS | __PPC_PRFX_R(0),
+	PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12),
+	PPC_RAW_MTCTR(_R12),
+	PPC_RAW_BCTR(),
+#else
 	PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
 	PPC_RAW_ADDIS(_R12, _R12, 0),
 	PPC_RAW_ADDI(_R12, _R12, 0),
 	PPC_RAW_MTCTR(_R12),
 	PPC_RAW_BCTR(),
+#endif
 };
 
 /*
@@ -358,18 +369,37 @@ static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
 {
 	long reladdr;
 
-	memcpy(entry->jump, stub_insns, sizeof(stub_insns));
-
-	/* Stub uses address relative to kernel toc (from the paca) */
-	reladdr = addr - kernel_toc_addr();
-	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
-		pr_err("%s: Address of %ps out of range of kernel_toc.\n",
-							me->name, (void *)addr);
+	if ((unsigned long)entry->jump % 8 != 0) {
+		pr_err("%s: Address of stub entry is not 8-byte aligned\n", me->name);
 		return 0;
 	}
 
-	entry->jump[1] |= PPC_HA(reladdr);
-	entry->jump[2] |= PPC_LO(reladdr);
+	BUILD_BUG_ON(sizeof(stub_insns) > sizeof(entry->jump));
+	memcpy(entry->jump, stub_insns, sizeof(stub_insns));
+
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
+		/* Stub uses address relative to kernel base (from the paca) */
+		reladdr = addr - local_paca->kernelbase;
+		if (reladdr > 0x1FFFFFFFFL || reladdr < -0x200000000L) {
+			pr_err("%s: Address of 0x%lx out of range of 34-bit relative address reladdr=0x%lx entry=0x%lx.\n",
+				me->name, addr, reladdr, (unsigned long)entry);
+			return 0;
+		}
+
+		entry->jump[2] |= IMM_H18(reladdr);
+		entry->jump[3] |= IMM_L(reladdr);
+	} else {
+		/* Stub uses address relative to kernel toc (from the paca) */
+		reladdr = addr - kernel_toc_addr();
+		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+			pr_err("%s: Address of %ps out of range of kernel_toc.\n",
+								me->name, (void *)addr);
+			return 0;
+		}
+
+		entry->jump[1] |= PPC_HA(reladdr);
+		entry->jump[2] |= PPC_LO(reladdr);
+	}
 
 	/* Even though we don't use funcdata in the stub, it's needed elsewhere. */
 	entry->funcdata = func_desc(addr);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index be8db402e963..cda4e00b67c1 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -191,7 +191,9 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
 #endif
 	new_paca->lock_token = 0x8000;
 	new_paca->paca_index = cpu;
+#ifndef CONFIG_PPC_KERNEL_PCREL
 	new_paca->kernel_toc = kernel_toc_addr();
+#endif
 	new_paca->kernelbase = (unsigned long) _stext;
 	/* Only set MSR:IR/DR when MMU is initialized */
 	new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 72864fb7a6cc..78e7250bb6e8 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -727,6 +727,15 @@ int __init ftrace_dyn_arch_init(void)
 {
 	int i;
 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
+#ifdef CONFIG_PPC_KERNEL_PCREL
+	u32 stub_insns[] = {
+		/* pla r12,addr */
+		PPC_PREFIX_MLS | __PPC_PRFX_R(1),
+		PPC_INST_PADDI | ___PPC_RT(_R12),
+		PPC_RAW_MTCTR(_R12),
+		PPC_RAW_BCTR()
+	};
+#else
 	u32 stub_insns[] = {
 		PPC_RAW_LD(_R12, _R13, PACATOC),
 		PPC_RAW_ADDIS(_R12, _R12, 0),
@@ -734,6 +743,8 @@ int __init ftrace_dyn_arch_init(void)
 		PPC_RAW_MTCTR(_R12),
 		PPC_RAW_BCTR()
 	};
+#endif
+
 	unsigned long addr;
 	long reladdr;
 
@@ -742,19 +753,36 @@ int __init ftrace_dyn_arch_init(void)
 	else
 		addr = ppc_global_function_entry((void *)ftrace_caller);
 
-	reladdr = addr - kernel_toc_addr();
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
+		for (i = 0; i < 2; i++) {
+			reladdr = addr - (unsigned long)tramp[i];
 
-	if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
-		pr_err("Address of %ps out of range of kernel_toc.\n",
-				(void *)addr);
-		return -1;
-	}
+			if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
+				pr_err("Address of %ps out of range of pcrel address.\n",
+						(void *)addr);
+				return -1;
+			}
+
+			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
+			tramp[i][0] |= IMM_H18(reladdr);
+			tramp[i][1] |= IMM_L(reladdr);
+			add_ftrace_tramp((unsigned long)tramp[i]);
+		}
+	} else {
+		reladdr = addr - kernel_toc_addr();
 
-	for (i = 0; i < 2; i++) {
-		memcpy(tramp[i], stub_insns, sizeof(stub_insns));
-		tramp[i][1] |= PPC_HA(reladdr);
-		tramp[i][2] |= PPC_LO(reladdr);
-		add_ftrace_tramp((unsigned long)tramp[i]);
+		if (reladdr >= (long)SZ_2G || reladdr < -(long)SZ_2G) {
+			pr_err("Address of %ps out of range of kernel_toc.\n",
+					(void *)addr);
+			return -1;
+		}
+
+		for (i = 0; i < 2; i++) {
+			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
+			tramp[i][1] |= PPC_HA(reladdr);
+			tramp[i][2] |= PPC_LO(reladdr);
+			add_ftrace_tramp((unsigned long)tramp[i]);
+		}
 	}
 
 	return 0;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index ffe5d90abe17..fcc0ad6d9c7b 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -177,9 +177,15 @@ fpone:
 fphalf:
 	.quad	0x3fe0000000000000	/* 0.5 */
 
+#ifdef CONFIG_PPC_KERNEL_PCREL
+#define LDCONST(fr, name)		\
+	pla	r11,name@pcrel;		\
+	lfd	fr,0(r11)
+#else
 #define LDCONST(fr, name)		\
 	addis	r11,r2,name@toc@ha;	\
 	lfd	fr,name@toc@l(r11)
+#endif
 #endif
 	.text
 /*
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8c3862b4c259..ed0dc6ac566c 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -168,12 +168,18 @@ SECTIONS
 	}
 
 #else /* CONFIG_PPC32 */
+#ifndef CONFIG_PPC_KERNEL_PCREL
 	.toc1 : AT(ADDR(.toc1) - LOAD_OFFSET) {
 		*(.toc1)
 	}
+#endif
 
 	.got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) {
+#ifdef CONFIG_PPC_KERNEL_PCREL
+		*(.got)
+#else
 		*(.got .toc)
+#endif
 	}
 
 	SOFT_MASK_TABLE(8)
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index 6812cb19d04a..f426a24a955d 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -18,8 +18,10 @@ FTR_SECTION_ELSE
 #endif
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
 	ori	r5,r5,PAGE_SIZE@l
-BEGIN_FTR_SECTION
+	/* prefixed instructions cause label difference to be non-constant
+	 * so can't go in FTR sections */
 	LOAD_REG_ADDR(r10, ppc64_caches)
+BEGIN_FTR_SECTION
 	lwz	r11,DCACHEL1LOGBLOCKSIZE(r10)	/* log2 of cache block size */
 	lwz     r12,DCACHEL1BLOCKSIZE(r10)	/* get cache block size */
 	li	r9,0
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index a4f7880f959d..31de45f02827 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -19,6 +19,8 @@
 #define FUNCTION_DESCR_SIZE	0
 #endif
 
+#define CTX_NIA(ctx) ((unsigned long)ctx->idx * 4)
+
 #define PLANT_INSTR(d, idx, instr)					      \
 	do { if (d) { (d)[idx] = instr; } idx++; } while (0)
 #define EMIT(instr)		PLANT_INSTR(image, ctx->idx, instr)
@@ -26,7 +28,7 @@
 /* Long jump; (unconditional 'branch') */
 #define PPC_JMP(dest)							      \
 	do {								      \
-		long offset = (long)(dest) - (ctx->idx * 4);		      \
+		long offset = (long)(dest) - CTX_NIA(ctx);		      \
 		if ((dest) != 0 && !is_offset_in_branch_range(offset)) {		      \
 			pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);			\
 			return -ERANGE;					      \
@@ -40,7 +42,7 @@
 /* "cond" here covers BO:BI fields. */
 #define PPC_BCC_SHORT(cond, dest)					      \
 	do {								      \
-		long offset = (long)(dest) - (ctx->idx * 4);		      \
+		long offset = (long)(dest) - CTX_NIA(ctx);		      \
 		if ((dest) != 0 && !is_offset_in_cond_branch_range(offset)) {		      \
 			pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);		\
 			return -ERANGE;					      \
@@ -92,12 +94,12 @@
  * state.
  */
 #define PPC_BCC(cond, dest)	do {					      \
-		if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) {	\
+		if (is_offset_in_cond_branch_range((long)(dest) - CTX_NIA(ctx))) {	\
 			PPC_BCC_SHORT(cond, dest);			      \
 			EMIT(PPC_RAW_NOP());				      \
 		} else {						      \
 			/* Flip the 'T or F' bit to invert comparison */      \
-			PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4);  \
+			PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, CTX_NIA(ctx) + 2*4);  \
 			PPC_JMP(dest);					      \
 		} } while(0)
 
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 29ee306d6302..5e5b97383d6e 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -126,8 +126,10 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 {
 	int i;
 
+#ifndef CONFIG_PPC_KERNEL_PCREL
 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
+#endif
 
 	/*
 	 * Initialize tail_call_cnt if we do tail calls.
@@ -208,16 +210,31 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
 	if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
 		return -EINVAL;
 
-	reladdr = func_addr - kernel_toc_addr();
-	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
-		pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
-		return -ERANGE;
-	}
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
+		reladdr = func_addr - CTX_NIA(ctx);
 
-	EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
-	EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
-	EMIT(PPC_RAW_MTCTR(_R12));
-	EMIT(PPC_RAW_BCTRL());
+		if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
+			pr_err("eBPF: address of %ps out of range of pcrel address.\n", (void *)func);
+			return -ERANGE;
+		}
+		/* pla r12,addr */
+		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
+		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
+		EMIT(PPC_RAW_MTCTR(_R12));
+		EMIT(PPC_RAW_BCTR());
+
+	} else {
+		reladdr = func_addr - kernel_toc_addr();
+		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
+			return -ERANGE;
+		}
+
+		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
+		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
+		EMIT(PPC_RAW_MTCTR(_R12));
+		EMIT(PPC_RAW_BCTRL());
+	}
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 0d9646101caf..ccd871d044ed 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -181,6 +181,7 @@ config POWER10_CPU
 	depends on PPC_BOOK3S_64
 	select ARCH_HAS_FAST_MULTIPLIER
 	select PPC_HAVE_PREFIXED_SUPPORT
+	select PPC_HAVE_PCREL_SUPPORT
 
 config E5500_CPU
 	bool "Freescale e5500"
@@ -461,6 +462,20 @@ config PPC_KERNEL_PREFIXED
 	  Kernel support for prefixed instructions in applications and guests
           is not affected by this option.
 
+config PPC_KERNEL_PCREL
+	depends on PPC_HAVE_PCREL_SUPPORT
+	depends on PPC_HAVE_PREFIXED_SUPPORT
+	depends on CC_HAS_PCREL
+	default n
+	select PPC_KERNEL_PREFIXED
+	bool "Build Kernel with PC-Relative addressing model"
+	help
+	  POWER10 and later CPUs support pc relative addressing. Recent
+	  compilers have support for an ELF ABI extension for a pc relative
+	  ABI.
+
+	  This option builds the kernel with the pc relative ABI model.
+
 config PPC_KUEP
 	bool "Kernel Userspace Execution Prevention" if !40x
 	default y if !40x
@@ -500,6 +515,9 @@ config PPC_HAVE_PMU_SUPPORT
 config PPC_HAVE_PREFIXED_SUPPORT
 	bool
 
+config PPC_HAVE_PCREL_SUPPORT
+	bool
+
 config PMU_SYSFS
 	bool "Create PMU SPRs sysfs file"
 	default n
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0da66bc4823d..21284e890d64 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2636,7 +2636,9 @@ static void dump_one_paca(int cpu)
 
 	DUMP(p, lock_token, "%#-*x");
 	DUMP(p, paca_index, "%#-*x");
+#ifndef CONFIG_PPC_KERNEL_PCREL
 	DUMP(p, kernel_toc, "%#-*llx");
+#endif
 	DUMP(p, kernelbase, "%#-*llx");
 	DUMP(p, kernel_msr, "%#-*llx");
 	DUMP(p, emergency_sp, "%-*px");
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 9/9] powerpc/64: modules support building with PCREL addresing
  2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
                   ` (7 preceding siblings ...)
  2022-12-27  9:26 ` [RFC PATCH 8/9] powerpc/64: vmlinux support building with PCREL addresing Nicholas Piggin
@ 2022-12-27  9:26 ` Nicholas Piggin
  8 siblings, 0 replies; 10+ messages in thread
From: Nicholas Piggin @ 2022-12-27  9:26 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Alan Modra

Build modules using PCREL addressing when the kernel PCREL build option
is selected.

The module loader must handle several new relocation types:
- R_PPC64_REL24_NOTOC is a function cal handled like R_PPC_REL24, but
  does not restore r2 upon return. The external function call stub is
  changed to use PCREL addressing to load the function pointer rather
  than based on the module TOC.
- R_PPC64_GOT_PCREL34 is a reference to external data. A GOT table must
  be built by hand, because the linker adds this during the final link
  (which is not done for kernel modules). The GOT table is built
  similarly to the way the external function call stub table is. This
  section is called .mygot because .got has a special meaning for the
  linker which can upset it.
- R_PPC64_PCREL34 is used for local data addressing. There is a special
  case where the percpu section is moved at load-time to the percpu
  area which is out of range of this relocation, that case gets converted
  to a GOT address (XXX: is this kosher? Must restrict this case to only
  percpu so it doesn't paper over any bugs).

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/Makefile               |   5 +-
 arch/powerpc/include/asm/module.h   |   9 +-
 arch/powerpc/include/asm/ppc_asm.h  |   6 +-
 arch/powerpc/include/uapi/asm/elf.h |   4 +
 arch/powerpc/kernel/module_64.c     | 294 ++++++++++++++++++++++++----
 5 files changed, 269 insertions(+), 49 deletions(-)

diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 7bd83d124c1e..833247e1a81a 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -107,9 +107,7 @@ LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) += -z notext
 LDFLAGS_vmlinux	:= $(LDFLAGS_vmlinux-y)
 
 ifdef CONFIG_PPC64
-ifdef CONFIG_PPC_KERNEL_PCREL
-	KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-pcrel)
-endif
+ifndef CONFIG_PPC_KERNEL_PCREL
 ifeq ($(call cc-option-yn,-mcmodel=medium),y)
 	# -mcmodel=medium breaks modules because it uses 32bit offsets from
 	# the TOC pointer to create pointers where possible. Pointers into the
@@ -124,6 +122,7 @@ else
 	export NO_MINIMAL_TOC := -mno-minimal-toc
 endif
 endif
+endif
 
 CFLAGS-$(CONFIG_PPC64)	:= $(call cc-option,-mtraceback=no)
 ifndef CONFIG_CC_IS_CLANG
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 09e2ffd360bb..f3d12a553863 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -27,8 +27,12 @@ struct ppc_plt_entry {
 struct mod_arch_specific {
 #ifdef __powerpc64__
 	unsigned int stubs_section;	/* Index of stubs section in module */
+#ifdef CONFIG_PPC_KERNEL_PCREL
+	unsigned int got_section;	/* What section is the GOT? */
+#else
 	unsigned int toc_section;	/* What section is the TOC? */
 	bool toc_fixed;			/* Have we fixed up .TOC.? */
+#endif
 
 	/* For module function descriptor dereference */
 	unsigned long start_opd;
@@ -52,12 +56,15 @@ struct mod_arch_specific {
 
 /*
  * Select ELF headers.
- * Make empty section for module_frob_arch_sections to expand.
+ * Make empty sections for module_frob_arch_sections to expand.
  */
 
 #ifdef __powerpc64__
 #    ifdef MODULE
 	asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
+#        ifdef CONFIG_PPC_KERNEL_PCREL
+	    asm(".section .mygot,\"a\",@nobits; .align 3; .previous");
+#        endif
 #    endif
 #else
 #    ifdef MODULE
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 9315f007d010..1a00523559e7 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -183,7 +183,7 @@
 /*
  * Used to name C functions called from asm
  */
-#if defined(CONFIG_PPC_KERNEL_PCREL) && !defined(MODULE)
+#ifdef CONFIG_PPC_KERNEL_PCREL
 #define CFUNC(name) name@notoc
 #else
 #define CFUNC(name) name
@@ -216,7 +216,7 @@
 	.globl name; \
 name:
 
-#if defined(CONFIG_PPC_KERNEL_PCREL) && !defined(MODULE)
+#ifdef CONFIG_PPC_KERNEL_PCREL
 #define _GLOBAL_TOC _GLOBAL
 #else
 #define _GLOBAL_TOC(name) \
@@ -379,7 +379,7 @@ GLUE(.,name):
 	ori	reg, reg, (expr)@l;		\
 	rldimi	reg, tmp, 32, 0
 
-#if defined(CONFIG_PPC_KERNEL_PCREL) && !defined(MODULE)
+#ifdef CONFIG_PPC_KERNEL_PCREL
 #define LOAD_REG_ADDR(reg,name)			\
 	pla	reg,name@pcrel
 
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
index 308857123a08..dbc4a5b8d02d 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -279,8 +279,12 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
 #define R_PPC64_TLSLD		108
 #define R_PPC64_TOCSAVE		109
 
+#define R_PPC64_REL24_NOTOC	116
 #define R_PPC64_ENTRY		118
 
+#define R_PPC64_PCREL34		132
+#define R_PPC64_GOT_PCREL34	133
+
 #define R_PPC64_REL16		249
 #define R_PPC64_REL16_LO	250
 #define R_PPC64_REL16_HI	251
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index de01ded00281..6f443916ad7b 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -114,6 +114,10 @@ struct ppc64_stub_entry {
 	func_desc_t funcdata;
 } __aligned(8);
 
+struct ppc64_got_entry {
+	u64 addr;
+};
+
 /*
  * PPC64 uses 24 bit jumps, but we need to jump into other modules or
  * the kernel which may be further.  So we jump to a stub.
@@ -128,6 +132,14 @@ struct ppc64_stub_entry {
  * to the TOC ptr, r2) into the stub.
  */
 static u32 ppc64_stub_insns[] = {
+#ifdef CONFIG_PPC_KERNEL_PCREL
+	/* pla r11,addr ; ld r12,32(r11) (XXX: could use PLD) */
+	PPC_PREFIX_MLS | __PPC_PRFX_R(1),
+	PPC_INST_PADDI | ___PPC_RT(_R11),
+	PPC_RAW_LD(_R12, _R11, 32),
+	PPC_RAW_MTCTR(_R12),
+	PPC_RAW_BCTR(),
+#else
 	PPC_RAW_ADDIS(_R11, _R2, 0),
 	PPC_RAW_ADDI(_R11, _R11, 0),
 	/* Save current r2 value in magic place on the stack. */
@@ -139,11 +151,13 @@ static u32 ppc64_stub_insns[] = {
 #endif
 	PPC_RAW_MTCTR(_R12),
 	PPC_RAW_BCTR(),
+#endif
 };
 
 /* Count how many different 24-bit relocations (different symbol,
    different addend) */
-static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
+static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num,
+				 unsigned long r_type)
 {
 	unsigned int i, r_info, r_addend, _count_relocs;
 
@@ -153,7 +167,7 @@ static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
 	r_addend = 0;
 	for (i = 0; i < num; i++)
 		/* Only count 24-bit relocs, others don't need stubs */
-		if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
+		if (ELF64_R_TYPE(rela[i].r_info) == r_type &&
 		    (r_info != ELF64_R_SYM(rela[i].r_info) ||
 		     r_addend != rela[i].r_addend)) {
 			_count_relocs++;
@@ -214,7 +228,14 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
 
 			relocs += count_relocs((void *)sechdrs[i].sh_addr,
 					       sechdrs[i].sh_size
-					       / sizeof(Elf64_Rela));
+					       / sizeof(Elf64_Rela),
+					       R_PPC_REL24);
+#ifdef CONFIG_PPC_KERNEL_PCREL
+			relocs += count_relocs((void *)sechdrs[i].sh_addr,
+					       sechdrs[i].sh_size
+					       / sizeof(Elf64_Rela),
+					       R_PPC64_REL24_NOTOC);
+#endif
 		}
 	}
 
@@ -231,6 +252,47 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
 	return relocs * sizeof(struct ppc64_stub_entry);
 }
 
+/* Get size of potential GOT required. */
+static unsigned long get_got_size(const Elf64_Ehdr *hdr,
+				  const Elf64_Shdr *sechdrs)
+{
+	/* One extra reloc so it's always 0-addr terminated */
+	unsigned long relocs = 1;
+	unsigned i;
+
+	/* Every relocated section... */
+	for (i = 1; i < hdr->e_shnum; i++) {
+		if (sechdrs[i].sh_type == SHT_RELA) {
+			pr_debug("Found relocations in section %u\n", i);
+			pr_debug("Ptr: %p.  Number: %Lu\n",
+			       (void *)sechdrs[i].sh_addr,
+			       sechdrs[i].sh_size / sizeof(Elf64_Rela));
+
+			/* Sort the relocation information based on a symbol and
+			 * addend key. This is a stable O(n*log n) complexity
+			 * algorithm but it will reduce the complexity of
+			 * count_relocs() to linear complexity O(n)
+			 */
+			sort((void *)sechdrs[i].sh_addr,
+			     sechdrs[i].sh_size / sizeof(Elf64_Rela),
+			     sizeof(Elf64_Rela), relacmp, NULL);
+
+			relocs += count_relocs((void *)sechdrs[i].sh_addr,
+					       sechdrs[i].sh_size
+					       / sizeof(Elf64_Rela),
+					       R_PPC64_GOT_PCREL34);
+		}
+	}
+
+#ifdef CONFIG_SMP
+	relocs++; /* .data..percpu relocs XXX: count this properly? */
+#endif
+
+	pr_debug("Looks like a total of %lu GOT entries, max\n", relocs);
+	return relocs * sizeof(struct ppc64_got_entry);
+}
+
+
 /* Still needed for ELFv2, for .TOC. */
 static void dedotify_versions(struct modversion_info *vers,
 			      unsigned long size)
@@ -263,6 +325,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 	}
 }
 
+#ifndef CONFIG_PPC_KERNEL_PCREL
 static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
 			       const char *strtab,
 			       unsigned int symindex)
@@ -280,6 +343,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
 	}
 	return NULL;
 }
+#endif
 
 bool module_init_section(const char *name)
 {
@@ -298,11 +362,19 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
 	for (i = 1; i < hdr->e_shnum; i++) {
 		if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
 			me->arch.stubs_section = i;
+#ifdef CONFIG_PPC_KERNEL_PCREL
+		else if (strcmp(secstrings + sechdrs[i].sh_name, ".mygot") == 0) {
+			me->arch.got_section = i;
+			if (sechdrs[i].sh_addralign < 8)
+				sechdrs[i].sh_addralign = 8;
+		}
+#else
 		else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) {
 			me->arch.toc_section = i;
 			if (sechdrs[i].sh_addralign < 8)
 				sechdrs[i].sh_addralign = 8;
 		}
+#endif
 		else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
 			dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
 					  sechdrs[i].sh_size);
@@ -319,15 +391,26 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
 		return -ENOEXEC;
 	}
 
+#ifdef CONFIG_PPC_KERNEL_PCREL
+	if (!me->arch.got_section) {
+		pr_err("%s: doesn't contain .mygot.\n", me->name);
+		return -ENOEXEC;
+	}
+
+	/* Override the got size */
+	sechdrs[me->arch.got_section].sh_size = get_got_size(hdr, sechdrs);
+#else
 	/* If we don't have a .toc, just use .stubs.  We need to set r2
 	   to some reasonable value in case the module calls out to
 	   other functions via a stub, or if a function pointer escapes
 	   the module by some means.  */
 	if (!me->arch.toc_section)
 		me->arch.toc_section = me->arch.stubs_section;
+#endif
 
 	/* Override the stubs size */
 	sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
+
 	return 0;
 }
 
@@ -381,8 +464,8 @@ static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
 		/* Stub uses address relative to kernel base (from the paca) */
 		reladdr = addr - local_paca->kernelbase;
 		if (reladdr > 0x1FFFFFFFFL || reladdr < -0x200000000L) {
-			pr_err("%s: Address of 0x%lx out of range of 34-bit relative address reladdr=0x%lx entry=0x%lx.\n",
-				me->name, addr, reladdr, (unsigned long)entry);
+			pr_err("%s: Address of %p out of range of 34-bit relative address.\n",
+				me->name, (void *)addr);
 			return 0;
 		}
 
@@ -392,8 +475,8 @@ static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
 		/* Stub uses address relative to kernel toc (from the paca) */
 		reladdr = addr - kernel_toc_addr();
 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
-			pr_err("%s: Address of %ps out of range of kernel_toc.\n",
-								me->name, (void *)addr);
+			pr_err("%s: Address of %p out of range of kernel_toc.\n",
+				me->name, (void *)addr);
 			return 0;
 		}
 
@@ -437,6 +520,39 @@ static bool is_mprofile_ftrace_call(const char *name)
 }
 #endif
 
+/* We expect a noop next: if it is, replace it with instruction to
+   restore r2. */
+static int restore_r2(const char *name, u32 *instruction, struct module *me)
+{
+	u32 *prev_insn = instruction - 1;
+
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
+		return 1;
+
+	if (is_mprofile_ftrace_call(name))
+		return 1;
+
+	/*
+	 * Make sure the branch isn't a sibling call.  Sibling calls aren't
+	 * "link" branches and they don't return, so they don't need the r2
+	 * restore afterwards.
+	 */
+	if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
+		return 1;
+
+	if (*instruction != PPC_RAW_NOP()) {
+		pr_err("%s: Expected nop after call, got %08x at %pS\n",
+			me->name, *instruction, instruction);
+		return 0;
+	}
+
+	/* ld r2,R2_STACK_OFFSET(r1) */
+	if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)))
+		return 0;
+
+	return 1;
+}
+
 /*
  * r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the
  * value maximum span in an instruction which uses a signed offset). Round down
@@ -445,7 +561,11 @@ static bool is_mprofile_ftrace_call(const char *name)
  */
 static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
 {
+#ifndef CONFIG_PPC_KERNEL_PCREL
 	return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000;
+#else
+	return -1;
+#endif
 }
 
 /* Patch stub to reference function and correct r2 value. */
@@ -462,28 +582,52 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
 	if (is_mprofile_ftrace_call(name))
 		return create_ftrace_stub(entry, addr, me);
 
+	if ((unsigned long)entry->jump % 8 != 0) {
+		pr_err("%s: Address of stub entry is not 8-byte aligned\n", me->name);
+		return 0;
+	}
+
+	BUILD_BUG_ON(sizeof(ppc64_stub_insns) > sizeof(entry->jump));
 	for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) {
 		if (patch_instruction(&entry->jump[i],
 				      ppc_inst(ppc64_stub_insns[i])))
 			return 0;
 	}
 
-	/* Stub uses address relative to r2. */
-	reladdr = (unsigned long)entry - my_r2(sechdrs, me);
-	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
-		pr_err("%s: Address %p of stub out of range of %p.\n",
-		       me->name, (void *)reladdr, (void *)my_r2);
-		return 0;
-	}
-	pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
+	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
+		/* Stub uses address relative to itself! */
+		reladdr = 0;
+		if (reladdr > 0x1FFFFFFFFL || reladdr < -0x200000000L) {
+			pr_err("%s: Address of %p out of range of 34-bit relative address.\n",
+				me->name, (void *)reladdr);
+			return 0;
+		}
+		pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
 
-	if (patch_instruction(&entry->jump[0],
-			      ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
-		return 0;
+		/* May not even need this if we're relative to 0 */
+		if (patch_instruction(&entry->jump[0],
+		    ppc_inst_prefix(entry->jump[0] | IMM_H18(reladdr),
+				    entry->jump[1] | IMM_L(reladdr))))
+			return 0;
 
-	if (patch_instruction(&entry->jump[1],
-			  ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
-		return 0;
+	} else {
+		/* Stub uses address relative to r2. */
+		reladdr = (unsigned long)entry - my_r2(sechdrs, me);
+		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+			pr_err("%s: Address %p of stub out of range of %p.\n",
+			       me->name, (void *)reladdr, (void *)my_r2);
+			return 0;
+		}
+		pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
+
+		if (patch_instruction(&entry->jump[0],
+				      ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
+			return 0;
+
+		if (patch_instruction(&entry->jump[1],
+				  ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
+			return 0;
+	}
 
 	// func_desc_t is 8 bytes if ABIv2, else 16 bytes
 	desc = func_desc(addr);
@@ -527,34 +671,33 @@ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
 	return (unsigned long)&stubs[i];
 }
 
-/* We expect a noop next: if it is, replace it with instruction to
-   restore r2. */
-static int restore_r2(const char *name, u32 *instruction, struct module *me)
+/* Create GOT to load the location described in this ptr */
+static unsigned long got_for_addr(const Elf64_Shdr *sechdrs,
+				  unsigned long addr,
+				  struct module *me,
+				  const char *name)
 {
-	u32 *prev_insn = instruction - 1;
+	struct ppc64_got_entry *got;
+	unsigned int i, num_got;
 
-	if (is_mprofile_ftrace_call(name))
-		return 1;
+	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
+		return addr;
 
-	/*
-	 * Make sure the branch isn't a sibling call.  Sibling calls aren't
-	 * "link" branches and they don't return, so they don't need the r2
-	 * restore afterwards.
-	 */
-	if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
-		return 1;
+	num_got = sechdrs[me->arch.got_section].sh_size / sizeof(*got);
 
-	if (*instruction != PPC_RAW_NOP()) {
-		pr_err("%s: Expected nop after call, got %08x at %pS\n",
-			me->name, *instruction, instruction);
-		return 0;
+	/* Find this stub, or if that fails, the next avail. entry */
+	got = (void *)sechdrs[me->arch.got_section].sh_addr;
+	for (i = 0; got[i].addr; i++) {
+		if (WARN_ON(i >= num_got))
+			return 0;
+
+		if (got[i].addr == addr)
+			return (unsigned long)&got[i];
 	}
 
-	/* ld r2,R2_STACK_OFFSET(r1) */
-	if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)))
-		return 0;
+	got[i].addr = addr;
 
-	return 1;
+	return (unsigned long)&got[i];
 }
 
 int apply_relocate_add(Elf64_Shdr *sechdrs,
@@ -572,6 +715,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 	pr_debug("Applying ADD relocate section %u to %u\n", relsec,
 	       sechdrs[relsec].sh_info);
 
+#ifndef CONFIG_PPC_KERNEL_PCREL
 	/* First time we're called, we can fix up .TOC. */
 	if (!me->arch.toc_fixed) {
 		sym = find_dot_toc(sechdrs, strtab, symindex);
@@ -581,6 +725,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 			sym->st_value = my_r2(sechdrs, me);
 		me->arch.toc_fixed = true;
 	}
+#endif
 
 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
 		/* This is where to make the change */
@@ -609,6 +754,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 			*(unsigned long *)location = value;
 			break;
 
+#ifndef CONFIG_PPC_KERNEL_PCREL
 		case R_PPC64_TOC:
 			*(unsigned long *)location = my_r2(sechdrs, me);
 			break;
@@ -668,8 +814,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 				= (*((uint16_t *) location) & ~0xffff)
 				| (value & 0xffff);
 			break;
+#endif
 
 		case R_PPC_REL24:
+#ifdef CONFIG_PPC_KERNEL_PCREL
+		/* PCREL still generates REL24 for mcount */
+		case R_PPC64_REL24_NOTOC:
+#endif
 			/* FIXME: Handle weak symbols here --RR */
 			if (sym->st_shndx == SHN_UNDEF ||
 			    sym->st_shndx == SHN_LIVEPATCH) {
@@ -717,6 +868,45 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 			*(u32 *)location = value;
 			break;
 
+#ifdef CONFIG_PPC_KERNEL_PCREL
+		case R_PPC64_PCREL34:
+			if (value - (unsigned long)location + 0x200000000 > 0x3ffffffff) {
+				/* XXX: this hits for per-cpu data relocs, should enforce no other relocs go through here */
+				value = got_for_addr(sechdrs, value, me,
+						strtab + sym->st_name);
+				if (!value)
+					return -ENOENT;
+				value -= (unsigned long)location;
+				/* Turn pla into pld */
+				((uint32_t *)location)[0]
+					= (((uint32_t *)location)[0] & ~0x02000000);
+				((uint32_t *)location)[1]
+					= (((uint32_t *)location)[1] & ~0xf8000000) | 0xe4000000;
+
+				((uint32_t *)location)[0]
+					= (((uint32_t *)location)[0] & ~0x3ffff)
+					| ((value >> 16) & 0x3ffff);
+				((uint32_t *)location)[1]
+					= (((uint32_t *)location)[1] & ~0xffff)
+					| (value & 0xffff);
+				break;
+			}
+			/* Convert value to relative */
+			value -= (unsigned long)location;
+			if (value + 0x200000000 > 0x3ffffffff) {
+				pr_err("%s: PCREL34 %li out of range!\n",
+				       me->name, (long int)value);
+				return -ENOEXEC;
+			}
+
+			if (patch_instruction((u32 *)location,
+			    ppc_inst_prefix((*(u32 *)location & ~0x3ffff) | IMM_H18(value),
+					    (*((u32 *)location + 1) & ~0xffff) | IMM_L(value))))
+				return -EFAULT;
+
+			break;
+
+#else
 		case R_PPC64_TOCSAVE:
 			/*
 			 * Marker reloc indicates we don't have to save r2.
@@ -724,8 +914,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 			 * it.
 			 */
 			break;
+#endif
 
 		case R_PPC64_ENTRY:
+			if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
+				break;
+
 			/*
 			 * Optimize ELFv2 large code model entry point if
 			 * the TOC is within 2GB range of current location.
@@ -768,6 +962,22 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 				| (value & 0xffff);
 			break;
 
+#ifdef CONFIG_PPC_KERNEL_PCREL
+		case R_PPC64_GOT_PCREL34:
+			value = got_for_addr(sechdrs, value, me,
+					strtab + sym->st_name);
+			if (!value)
+				return -ENOENT;
+			value -= (unsigned long)location;
+			((uint32_t *)location)[0]
+				= (((uint32_t *)location)[0] & ~0x3ffff)
+				| ((value >> 16) & 0x3ffff);
+			((uint32_t *)location)[1]
+				= (((uint32_t *)location)[1] & ~0xffff)
+				| (value & 0xffff);
+			break;
+#endif
+
 		default:
 			pr_err("%s: Unknown ADD relocation: %lu\n",
 			       me->name,
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2022-12-27  9:35 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-27  9:26 [RFC PATCH 0/9] powerpc/64: Build with PC-Relative addressing Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 1/9] crypto: powerpc - Use address generation helper for asm Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 2/9] powerpc/64s: Refactor initialisation after prom Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 3/9] powerpc/64e: Simplify address calculation in secondary hold loop Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 4/9] powerpc/64: Move initial base and TOC pointer calculation Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 5/9] powerpc/64s: Run at the kernel virtual address earlier in boot Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 6/9] powerpc: add CFUNC assembly label annotation Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 7/9] powerpc/64: Add support to build with prefixed instructions Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 8/9] powerpc/64: vmlinux support building with PCREL addresing Nicholas Piggin
2022-12-27  9:26 ` [RFC PATCH 9/9] powerpc/64: modules " Nicholas Piggin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).