All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 0/6] powerpc: merge _switch in 32/64
@ 2023-06-06 13:24 Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 1/6] powerpc/64s: move stack SLB pinning out of line from _switch Nicholas Piggin
                   ` (6 more replies)
  0 siblings, 7 replies; 8+ messages in thread
From: Nicholas Piggin @ 2023-06-06 13:24 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

Since v2:
- Add PPC_CREATE_STACK_FRAME() to abstract prologue differences.
- Build fix.
- Makefile tidy [Christophe]
- Fix a missing SOB.

Since v1:
- Don't re-order 32-bit prologue.
- Improve Kconfig conditional includes.
- Break out code changes into their own patches before merging,
  so merge patch leaves generated code unchanged.
- Change prom_entry.S to prom_entry_64.S.


Nicholas Piggin (6):
  powerpc/64s: move stack SLB pinning out of line from _switch
  powerpc/64: Rearrange 64-bit _switch to prepare for 32/64 merge
  powerpc/32: Remove sync from _switch
  powerpc/32: Rearrange _switch to prepare for 32/64 merge
  powerpc: merge 32-bit and 64-bit _switch implementation
  powerpc/64: Rename entry_64.S to prom_entry_64.S

 arch/powerpc/include/asm/ppc_asm.h           |  14 +
 arch/powerpc/kernel/Makefile                 |  14 +-
 arch/powerpc/kernel/entry_32.S               |  58 ----
 arch/powerpc/kernel/prom_entry_64.S          |  87 +++++
 arch/powerpc/kernel/{entry_64.S => switch.S} | 331 ++++++++-----------
 scripts/head-object-list.txt                 |   2 +-
 6 files changed, 238 insertions(+), 268 deletions(-)
 create mode 100644 arch/powerpc/kernel/prom_entry_64.S
 rename arch/powerpc/kernel/{entry_64.S => switch.S} (59%)

-- 
2.40.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v3 1/6] powerpc/64s: move stack SLB pinning out of line from _switch
  2023-06-06 13:24 [PATCH v3 0/6] powerpc: merge _switch in 32/64 Nicholas Piggin
@ 2023-06-06 13:24 ` Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 2/6] powerpc/64: Rearrange 64-bit _switch to prepare for 32/64 merge Nicholas Piggin
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Nicholas Piggin @ 2023-06-06 13:24 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

The large hunk of SLB pinning in _switch asm code makes it more
difficult to see everything else that's going on. It is a less important
path now, so icache and fetch footprint overhead can be avoided.

Move context switch stack SLB pinning out of line.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/entry_64.S | 113 ++++++++++++++++++---------------
 1 file changed, 62 insertions(+), 51 deletions(-)

diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 1bf1121e17f1..2e02834c5824 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -105,6 +105,64 @@ flush_branch_caches:
 	.endr
 
 	blr
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+.balign 32
+/*
+ * New stack pointer in r8, old stack pointer in r1, must not clobber r3
+ */
+pin_stack_slb:
+BEGIN_FTR_SECTION
+	clrrdi	r6,r8,28	/* get its ESID */
+	clrrdi	r9,r1,28	/* get current sp ESID */
+FTR_SECTION_ELSE
+	clrrdi	r6,r8,40	/* get its 1T ESID */
+	clrrdi	r9,r1,40	/* get current sp 1T ESID */
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
+	clrldi.	r0,r6,2		/* is new ESID c00000000? */
+	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
+	cror	eq,4*cr1+eq,eq
+	beq	2f		/* if yes, don't slbie it */
+
+	/* Bolt in the new stack SLB entry */
+	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
+	oris	r0,r6,(SLB_ESID_V)@h
+	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
+BEGIN_FTR_SECTION
+	li	r9,MMU_SEGSIZE_1T	/* insert B field */
+	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
+	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+
+	/* Update the last bolted SLB.  No write barriers are needed
+	 * here, provided we only update the current CPU's SLB shadow
+	 * buffer.
+	 */
+	ld	r9,PACA_SLBSHADOWPTR(r13)
+	li	r12,0
+	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
+	li	r12,SLBSHADOW_STACKVSID
+	STDX_BE	r7,r12,r9			/* Save VSID */
+	li	r12,SLBSHADOW_STACKESID
+	STDX_BE	r0,r12,r9			/* Save ESID */
+
+	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
+	 * we have 1TB segments, the only CPUs known to have the errata
+	 * only support less than 1TB of system memory and we'll never
+	 * actually hit this code path.
+	 */
+
+	isync
+	slbie	r6
+BEGIN_FTR_SECTION
+	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+	slbmte	r7,r0
+	isync
+2:	blr
+	.size pin_stack_slb,.-pin_stack_slb
+#endif /* CONFIG_PPC_64S_HASH_MMU */
+
 #else
 #define FLUSH_COUNT_CACHE
 #endif /* CONFIG_PPC_BOOK3S_64 */
@@ -182,59 +240,12 @@ _GLOBAL(_switch)
 #endif
 
 	ld	r8,KSP(r4)	/* new stack pointer */
+
 #ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
-	b	2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
-BEGIN_FTR_SECTION
-	clrrdi	r6,r8,28	/* get its ESID */
-	clrrdi	r9,r1,28	/* get current sp ESID */
-FTR_SECTION_ELSE
-	clrrdi	r6,r8,40	/* get its 1T ESID */
-	clrrdi	r9,r1,40	/* get current sp 1T ESID */
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
-	clrldi.	r0,r6,2		/* is new ESID c00000000? */
-	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
-	cror	eq,4*cr1+eq,eq
-	beq	2f		/* if yes, don't slbie it */
-
-	/* Bolt in the new stack SLB entry */
-	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
-	oris	r0,r6,(SLB_ESID_V)@h
-	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
-BEGIN_FTR_SECTION
-	li	r9,MMU_SEGSIZE_1T	/* insert B field */
-	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
-	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-
-	/* Update the last bolted SLB.  No write barriers are needed
-	 * here, provided we only update the current CPU's SLB shadow
-	 * buffer.
-	 */
-	ld	r9,PACA_SLBSHADOWPTR(r13)
-	li	r12,0
-	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
-	li	r12,SLBSHADOW_STACKVSID
-	STDX_BE	r7,r12,r9			/* Save VSID */
-	li	r12,SLBSHADOW_STACKESID
-	STDX_BE	r0,r12,r9			/* Save ESID */
-
-	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
-	 * we have 1TB segments, the only CPUs known to have the errata
-	 * only support less than 1TB of system memory and we'll never
-	 * actually hit this code path.
-	 */
-
-	isync
-	slbie	r6
-BEGIN_FTR_SECTION
-	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
-	slbmte	r7,r0
-	isync
-2:
-#endif /* CONFIG_PPC_64S_HASH_MMU */
+	bl	pin_stack_slb
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+#endif
 
 	clrrdi	r7, r8, THREAD_SHIFT	/* base of new stack */
 	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v3 2/6] powerpc/64: Rearrange 64-bit _switch to prepare for 32/64 merge
  2023-06-06 13:24 [PATCH v3 0/6] powerpc: merge _switch in 32/64 Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 1/6] powerpc/64s: move stack SLB pinning out of line from _switch Nicholas Piggin
@ 2023-06-06 13:24 ` Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 3/6] powerpc/32: Remove sync from _switch Nicholas Piggin
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Nicholas Piggin @ 2023-06-06 13:24 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

More some 64-bit specifics out from the function epilogue and rearrange
this to be a bit neater, use 32-bit mem ops for CR save/restore, and
change some register numbers.

This is preparation to consolidate 32-bit and 64-bit switch code.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/entry_64.S | 38 ++++++++++++++++------------------
 1 file changed, 18 insertions(+), 20 deletions(-)

diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2e02834c5824..7430bd020a2a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -190,12 +190,13 @@ _GLOBAL(_switch)
 	mflr	r0
 	std	r0,16(r1)
 	stdu	r1,-SWITCH_FRAME_SIZE(r1)
+	std	r1,KSP(r3)	/* Set old stack pointer */
 	/* r3-r13 are caller saved -- Cort */
 	SAVE_NVGPRS(r1)
 	std	r0,_NIP(r1)	/* Return to switch caller */
-	mfcr	r23
-	std	r23,_CCR(r1)
-	std	r1,KSP(r3)	/* Set old stack pointer */
+	mfcr	r0
+	stw	r0,_CCR(r1)
+	ld	r8,KSP(r4)	/* Load new stack pointer */
 
 	kuap_check_amr r9, r10
 
@@ -232,14 +233,20 @@ _GLOBAL(_switch)
 	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
 #endif
 
-	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
-	std	r6,PACACURRENT(r13)	/* Set new 'current' */
+	addi	r3,r3,-THREAD	/* old thread -> task_struct for return value */
+	addi	r6,r4,-THREAD	/* new thread -> task_struct */
+	std	r6,PACACURRENT(r13)	/* Set new task_struct to 'current' */
 #if defined(CONFIG_STACKPROTECTOR)
 	ld	r6, TASK_CANARY(r6)
 	std	r6, PACA_CANARY(r13)
 #endif
-
-	ld	r8,KSP(r4)	/* new stack pointer */
+	/* Set the new PACAKSAVE */
+	clrrdi	r7, r8, THREAD_SHIFT	/* base of new stack */
+	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
+	   because we don't need to leave the 288-byte ABI gap at the
+	   top of the kernel stack. */
+	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
+	std	r7,PACAKSAVE(r13)
 
 #ifdef CONFIG_PPC_64S_HASH_MMU
 BEGIN_MMU_FTR_SECTION
@@ -247,12 +254,6 @@ BEGIN_MMU_FTR_SECTION
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
 #endif
 
-	clrrdi	r7, r8, THREAD_SHIFT	/* base of new stack */
-	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
-	   because we don't need to leave the 288-byte ABI gap at the
-	   top of the kernel stack. */
-	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
-
 	/*
 	 * PMU interrupts in radix may come in here. They will use r1, not
 	 * PACAKSAVE, so this stack switch will not cause a problem. They
@@ -262,18 +263,15 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
 	 * active on the new CPU, will order those stores.
 	 */
 	mr	r1,r8		/* start using new stack pointer */
-	std	r7,PACAKSAVE(r13)
 
-	ld	r6,_CCR(r1)
-	mtcrf	0xFF,r6
+	lwz	r0,_CCR(r1)
+	mtcrf	0xFF,r0
 
 	/* r3-r13 are destroyed -- Cort */
 	REST_NVGPRS(r1)
 
-	/* convert old thread to its task_struct for return value */
-	addi	r3,r3,-THREAD
-	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
-	mtlr	r7
+	ld	r0,_NIP(r1)	/* Return to _switch caller in new task */
+	mtlr	r0
 	addi	r1,r1,SWITCH_FRAME_SIZE
 	blr
 
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v3 3/6] powerpc/32: Remove sync from _switch
  2023-06-06 13:24 [PATCH v3 0/6] powerpc: merge _switch in 32/64 Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 1/6] powerpc/64s: move stack SLB pinning out of line from _switch Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 2/6] powerpc/64: Rearrange 64-bit _switch to prepare for 32/64 merge Nicholas Piggin
@ 2023-06-06 13:24 ` Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 4/6] powerpc/32: Rearrange _switch to prepare for 32/64 merge Nicholas Piggin
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Nicholas Piggin @ 2023-06-06 13:24 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

64-bit has removed the sync from _switch since commit 9145effd626d1
("powerpc/64: Drop explicit hwsync in context switch"). The same
logic there should apply to 32-bit. Remove the sync and replace with
a placeholder comment (32 and 64 will be merged with a later change).

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/entry_32.S | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 47f0dd9a45ad..089432128571 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -243,13 +243,7 @@ _GLOBAL(_switch)
 	stw	r10,_CCR(r1)
 	stw	r1,KSP(r3)	/* Set old stack pointer */
 
-#ifdef CONFIG_SMP
-	/* We need a sync somewhere here to make sure that if the
-	 * previous task gets rescheduled on another CPU, it sees all
-	 * stores it has performed on this one.
-	 */
-	sync
-#endif /* CONFIG_SMP */
+	/* The sync for SMP migration is taken care of, see entry_64.S */
 
 	tophys(r0,r4)
 	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v3 4/6] powerpc/32: Rearrange _switch to prepare for 32/64 merge
  2023-06-06 13:24 [PATCH v3 0/6] powerpc: merge _switch in 32/64 Nicholas Piggin
                   ` (2 preceding siblings ...)
  2023-06-06 13:24 ` [PATCH v3 3/6] powerpc/32: Remove sync from _switch Nicholas Piggin
@ 2023-06-06 13:24 ` Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 5/6] powerpc: merge 32-bit and 64-bit _switch implementation Nicholas Piggin
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Nicholas Piggin @ 2023-06-06 13:24 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

Change the order of some operations and change some register numbers in
preparation to merge 32-bit and 64-bit switch.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/entry_32.S | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 089432128571..2d17b14bb9e5 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -236,12 +236,12 @@ _GLOBAL(_switch)
 	stwu	r1,-SWITCH_FRAME_SIZE(r1)
 	mflr	r0
 	stw	r0,SWITCH_FRAME_SIZE+4(r1)
+	stw	r1,KSP(r3)	/* Set old stack pointer */
 	/* r3-r12 are caller saved -- Cort */
 	SAVE_NVGPRS(r1)
 	stw	r0,_NIP(r1)	/* Return to switch caller */
-	mfcr	r10
-	stw	r10,_CCR(r1)
-	stw	r1,KSP(r3)	/* Set old stack pointer */
+	mfcr	r0
+	stw	r0,_CCR(r1)
 
 	/* The sync for SMP migration is taken care of, see entry_64.S */
 
@@ -258,8 +258,8 @@ _GLOBAL(_switch)
 	/* r3-r12 are destroyed -- Cort */
 	REST_NVGPRS(r1)
 
-	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
-	mtlr	r4
+	lwz	r0,_NIP(r1)	/* Return to _switch caller in new task */
+	mtlr	r0
 	addi	r1,r1,SWITCH_FRAME_SIZE
 	blr
 
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v3 5/6] powerpc: merge 32-bit and 64-bit _switch implementation
  2023-06-06 13:24 [PATCH v3 0/6] powerpc: merge _switch in 32/64 Nicholas Piggin
                   ` (3 preceding siblings ...)
  2023-06-06 13:24 ` [PATCH v3 4/6] powerpc/32: Rearrange _switch to prepare for 32/64 merge Nicholas Piggin
@ 2023-06-06 13:24 ` Nicholas Piggin
  2023-06-06 13:24 ` [PATCH v3 6/6] powerpc/64: Rename entry_64.S to prom_entry_64.S Nicholas Piggin
  2023-07-03  5:26 ` [PATCH v3 0/6] powerpc: merge _switch in 32/64 Michael Ellerman
  6 siblings, 0 replies; 8+ messages in thread
From: Nicholas Piggin @ 2023-06-06 13:24 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

The _switch stack frame setup are substantially the same, so are the
comments. The difference in how the stack and current are switched,
and other hardware and software housekeeping is done is moved into
macros.

Generated code should be unchanged.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/include/asm/ppc_asm.h |  14 ++
 arch/powerpc/kernel/Makefile       |   2 +-
 arch/powerpc/kernel/entry_32.S     |  52 ------
 arch/powerpc/kernel/entry_64.S     | 229 -------------------------
 arch/powerpc/kernel/switch.S       | 258 +++++++++++++++++++++++++++++
 5 files changed, 273 insertions(+), 282 deletions(-)
 create mode 100644 arch/powerpc/kernel/switch.S

diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 5f05a984b103..e7792aa13510 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -406,6 +406,15 @@ GLUE(.,name):
 /* offsets for stack frame layout */
 #define LRSAVE	16
 
+/*
+ * GCC stack frames follow a different pattern on 32 vs 64. This can be used
+ * to make asm frames be consistent with C.
+ */
+#define PPC_CREATE_STACK_FRAME(size)			\
+	mflr		r0;				\
+	std		r0,16(r1);			\
+	stdu		r1,-(size)(r1)
+
 #else /* 32-bit */
 
 #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr
@@ -422,6 +431,11 @@ GLUE(.,name):
 /* offsets for stack frame layout */
 #define LRSAVE	4
 
+#define PPC_CREATE_STACK_FRAME(size)			\
+	stwu		r1,-(size)(r1);			\
+	mflr		r0;				\
+	stw		r0,(size+4)(r1)
+
 #endif
 
 /* various errata or part fixups */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 9bf2be123093..ec70a1748506 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -68,7 +68,7 @@ CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
 CFLAGS_syscall.o += -fno-stack-protector
 #endif
 
-obj-y				:= cputable.o syscalls.o \
+obj-y				:= cputable.o syscalls.o switch.o \
 				   irq.o align.o signal_$(BITS).o pmc.o vdso.o \
 				   process.o systbl.o idle.o \
 				   signal.o sysfs.o cacheinfo.o time.o \
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 2d17b14bb9e5..fe27d41f9a3d 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -211,58 +211,6 @@ start_kernel_thread:
 100:	trap
 	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
 
-
-/*
- * This routine switches between two different tasks.  The process
- * state of one is saved on its kernel stack.  Then the state
- * of the other is restored from its kernel stack.  The memory
- * management hardware is updated to the second process's state.
- * Finally, we can return to the second process.
- * On entry, r3 points to the THREAD for the current task, r4
- * points to the THREAD for the new task.
- *
- * This routine is always called with interrupts disabled.
- *
- * Note: there are two ways to get to the "going out" portion
- * of this code; either by coming in via the entry (_switch)
- * or via "fork" which must set up an environment equivalent
- * to the "_switch" path.  If you change this , you'll have to
- * change the fork code also.
- *
- * The code which creates the new task context is in 'copy_thread'
- * in arch/ppc/kernel/process.c
- */
-_GLOBAL(_switch)
-	stwu	r1,-SWITCH_FRAME_SIZE(r1)
-	mflr	r0
-	stw	r0,SWITCH_FRAME_SIZE+4(r1)
-	stw	r1,KSP(r3)	/* Set old stack pointer */
-	/* r3-r12 are caller saved -- Cort */
-	SAVE_NVGPRS(r1)
-	stw	r0,_NIP(r1)	/* Return to switch caller */
-	mfcr	r0
-	stw	r0,_CCR(r1)
-
-	/* The sync for SMP migration is taken care of, see entry_64.S */
-
-	tophys(r0,r4)
-	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
-	lwz	r1,KSP(r4)	/* Load new stack pointer */
-
-	/* save the old current 'last' for return value */
-	mr	r3,r2
-	addi	r2,r4,-THREAD	/* Update current */
-
-	lwz	r0,_CCR(r1)
-	mtcrf	0xFF,r0
-	/* r3-r12 are destroyed -- Cort */
-	REST_NVGPRS(r1)
-
-	lwz	r0,_NIP(r1)	/* Return to _switch caller in new task */
-	mtlr	r0
-	addi	r1,r1,SWITCH_FRAME_SIZE
-	blr
-
 	.globl	fast_exception_return
 fast_exception_return:
 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 7430bd020a2a..f3d3885ee9fd 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -14,7 +14,6 @@
  *  code, and exception/interrupt return code for PowerPC.
  */
 
-#include <linux/objtool.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <asm/cache.h>
@@ -45,236 +44,8 @@
 #include <asm/feature-fixups.h>
 #include <asm/kup.h>
 
-/*
- * System calls.
- */
 	.section	".text"
 
-#ifdef CONFIG_PPC_BOOK3S_64
-
-#define FLUSH_COUNT_CACHE	\
-1:	nop;			\
-	patch_site 1b, patch__call_flush_branch_caches1; \
-1:	nop;			\
-	patch_site 1b, patch__call_flush_branch_caches2; \
-1:	nop;			\
-	patch_site 1b, patch__call_flush_branch_caches3
-
-.macro nops number
-	.rept \number
-	nop
-	.endr
-.endm
-
-.balign 32
-.global flush_branch_caches
-flush_branch_caches:
-	/* Save LR into r9 */
-	mflr	r9
-
-	// Flush the link stack
-	.rept 64
-	ANNOTATE_INTRA_FUNCTION_CALL
-	bl	.+4
-	.endr
-	b	1f
-	nops	6
-
-	.balign 32
-	/* Restore LR */
-1:	mtlr	r9
-
-	// If we're just flushing the link stack, return here
-3:	nop
-	patch_site 3b patch__flush_link_stack_return
-
-	li	r9,0x7fff
-	mtctr	r9
-
-	PPC_BCCTR_FLUSH
-
-2:	nop
-	patch_site 2b patch__flush_count_cache_return
-
-	nops	3
-
-	.rept 278
-	.balign 32
-	PPC_BCCTR_FLUSH
-	nops	7
-	.endr
-
-	blr
-
-#ifdef CONFIG_PPC_64S_HASH_MMU
-.balign 32
-/*
- * New stack pointer in r8, old stack pointer in r1, must not clobber r3
- */
-pin_stack_slb:
-BEGIN_FTR_SECTION
-	clrrdi	r6,r8,28	/* get its ESID */
-	clrrdi	r9,r1,28	/* get current sp ESID */
-FTR_SECTION_ELSE
-	clrrdi	r6,r8,40	/* get its 1T ESID */
-	clrrdi	r9,r1,40	/* get current sp 1T ESID */
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
-	clrldi.	r0,r6,2		/* is new ESID c00000000? */
-	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
-	cror	eq,4*cr1+eq,eq
-	beq	2f		/* if yes, don't slbie it */
-
-	/* Bolt in the new stack SLB entry */
-	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
-	oris	r0,r6,(SLB_ESID_V)@h
-	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
-BEGIN_FTR_SECTION
-	li	r9,MMU_SEGSIZE_1T	/* insert B field */
-	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
-	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-
-	/* Update the last bolted SLB.  No write barriers are needed
-	 * here, provided we only update the current CPU's SLB shadow
-	 * buffer.
-	 */
-	ld	r9,PACA_SLBSHADOWPTR(r13)
-	li	r12,0
-	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
-	li	r12,SLBSHADOW_STACKVSID
-	STDX_BE	r7,r12,r9			/* Save VSID */
-	li	r12,SLBSHADOW_STACKESID
-	STDX_BE	r0,r12,r9			/* Save ESID */
-
-	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
-	 * we have 1TB segments, the only CPUs known to have the errata
-	 * only support less than 1TB of system memory and we'll never
-	 * actually hit this code path.
-	 */
-
-	isync
-	slbie	r6
-BEGIN_FTR_SECTION
-	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
-	slbmte	r7,r0
-	isync
-2:	blr
-	.size pin_stack_slb,.-pin_stack_slb
-#endif /* CONFIG_PPC_64S_HASH_MMU */
-
-#else
-#define FLUSH_COUNT_CACHE
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-/*
- * This routine switches between two different tasks.  The process
- * state of one is saved on its kernel stack.  Then the state
- * of the other is restored from its kernel stack.  The memory
- * management hardware is updated to the second process's state.
- * Finally, we can return to the second process, via interrupt_return.
- * On entry, r3 points to the THREAD for the current task, r4
- * points to the THREAD for the new task.
- *
- * Note: there are two ways to get to the "going out" portion
- * of this code; either by coming in via the entry (_switch)
- * or via "fork" which must set up an environment equivalent
- * to the "_switch" path.  If you change this you'll have to change
- * the fork code also.
- *
- * The code which creates the new task context is in 'copy_thread'
- * in arch/powerpc/kernel/process.c 
- */
-	.align	7
-_GLOBAL(_switch)
-	mflr	r0
-	std	r0,16(r1)
-	stdu	r1,-SWITCH_FRAME_SIZE(r1)
-	std	r1,KSP(r3)	/* Set old stack pointer */
-	/* r3-r13 are caller saved -- Cort */
-	SAVE_NVGPRS(r1)
-	std	r0,_NIP(r1)	/* Return to switch caller */
-	mfcr	r0
-	stw	r0,_CCR(r1)
-	ld	r8,KSP(r4)	/* Load new stack pointer */
-
-	kuap_check_amr r9, r10
-
-	FLUSH_COUNT_CACHE	/* Clobbers r9, ctr */
-
-	/*
-	 * On SMP kernels, care must be taken because a task may be
-	 * scheduled off CPUx and on to CPUy. Memory ordering must be
-	 * considered.
-	 *
-	 * Cacheable stores on CPUx will be visible when the task is
-	 * scheduled on CPUy by virtue of the core scheduler barriers
-	 * (see "Notes on Program-Order guarantees on SMP systems." in
-	 * kernel/sched/core.c).
-	 *
-	 * Uncacheable stores in the case of involuntary preemption must
-	 * be taken care of. The smp_mb__after_spinlock() in __schedule()
-	 * is implemented as hwsync on powerpc, which orders MMIO too. So
-	 * long as there is an hwsync in the context switch path, it will
-	 * be executed on the source CPU after the task has performed
-	 * all MMIO ops on that CPU, and on the destination CPU before the
-	 * task performs any MMIO ops there.
-	 */
-
-	/*
-	 * The kernel context switch path must contain a spin_lock,
-	 * which contains larx/stcx, which will clear any reservation
-	 * of the task being switched.
-	 */
-#ifdef CONFIG_PPC_BOOK3S
-/* Cancel all explict user streams as they will have no use after context
- * switch and will stop the HW from creating streams itself
- */
-	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
-#endif
-
-	addi	r3,r3,-THREAD	/* old thread -> task_struct for return value */
-	addi	r6,r4,-THREAD	/* new thread -> task_struct */
-	std	r6,PACACURRENT(r13)	/* Set new task_struct to 'current' */
-#if defined(CONFIG_STACKPROTECTOR)
-	ld	r6, TASK_CANARY(r6)
-	std	r6, PACA_CANARY(r13)
-#endif
-	/* Set the new PACAKSAVE */
-	clrrdi	r7, r8, THREAD_SHIFT	/* base of new stack */
-	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
-	   because we don't need to leave the 288-byte ABI gap at the
-	   top of the kernel stack. */
-	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
-	std	r7,PACAKSAVE(r13)
-
-#ifdef CONFIG_PPC_64S_HASH_MMU
-BEGIN_MMU_FTR_SECTION
-	bl	pin_stack_slb
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
-#endif
-
-	/*
-	 * PMU interrupts in radix may come in here. They will use r1, not
-	 * PACAKSAVE, so this stack switch will not cause a problem. They
-	 * will store to the process stack, which may then be migrated to
-	 * another CPU. However the rq lock release on this CPU paired with
-	 * the rq lock acquire on the new CPU before the stack becomes
-	 * active on the new CPU, will order those stores.
-	 */
-	mr	r1,r8		/* start using new stack pointer */
-
-	lwz	r0,_CCR(r1)
-	mtcrf	0xFF,r0
-
-	/* r3-r13 are destroyed -- Cort */
-	REST_NVGPRS(r1)
-
-	ld	r0,_NIP(r1)	/* Return to _switch caller in new task */
-	mtlr	r0
-	addi	r1,r1,SWITCH_FRAME_SIZE
-	blr
-
 _GLOBAL(enter_prom)
 	mflr	r0
 	std	r0,16(r1)
diff --git a/arch/powerpc/kernel/switch.S b/arch/powerpc/kernel/switch.S
new file mode 100644
index 000000000000..150002d7729a
--- /dev/null
+++ b/arch/powerpc/kernel/switch.S
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <linux/objtool.h>
+#include <asm/asm-offsets.h>
+#include <asm/code-patching-asm.h>
+#include <asm/kup.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/thread_info.h>
+
+.section ".text","ax",@progbits
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * Cancel all explict user streams as they will have no use after context
+ * switch and will stop the HW from creating streams itself
+ */
+#define STOP_STREAMS		\
+	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
+
+#define FLUSH_COUNT_CACHE	\
+1:	nop;			\
+	patch_site 1b, patch__call_flush_branch_caches1; \
+1:	nop;			\
+	patch_site 1b, patch__call_flush_branch_caches2; \
+1:	nop;			\
+	patch_site 1b, patch__call_flush_branch_caches3
+
+.macro nops number
+	.rept \number
+	nop
+	.endr
+.endm
+
+.balign 32
+.global flush_branch_caches
+flush_branch_caches:
+	/* Save LR into r9 */
+	mflr	r9
+
+	// Flush the link stack
+	.rept 64
+	ANNOTATE_INTRA_FUNCTION_CALL
+	bl	.+4
+	.endr
+	b	1f
+	nops	6
+
+	.balign 32
+	/* Restore LR */
+1:	mtlr	r9
+
+	// If we're just flushing the link stack, return here
+3:	nop
+	patch_site 3b patch__flush_link_stack_return
+
+	li	r9,0x7fff
+	mtctr	r9
+
+	PPC_BCCTR_FLUSH
+
+2:	nop
+	patch_site 2b patch__flush_count_cache_return
+
+	nops	3
+
+	.rept 278
+	.balign 32
+	PPC_BCCTR_FLUSH
+	nops	7
+	.endr
+
+	blr
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+.balign 32
+/*
+ * New stack pointer in r8, old stack pointer in r1, must not clobber r3
+ */
+pin_stack_slb:
+BEGIN_FTR_SECTION
+	clrrdi	r6,r8,28	/* get its ESID */
+	clrrdi	r9,r1,28	/* get current sp ESID */
+FTR_SECTION_ELSE
+	clrrdi	r6,r8,40	/* get its 1T ESID */
+	clrrdi	r9,r1,40	/* get current sp 1T ESID */
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
+	clrldi.	r0,r6,2		/* is new ESID c00000000? */
+	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
+	cror	eq,4*cr1+eq,eq
+	beq	2f		/* if yes, don't slbie it */
+
+	/* Bolt in the new stack SLB entry */
+	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
+	oris	r0,r6,(SLB_ESID_V)@h
+	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
+BEGIN_FTR_SECTION
+	li	r9,MMU_SEGSIZE_1T	/* insert B field */
+	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
+	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+
+	/* Update the last bolted SLB.  No write barriers are needed
+	 * here, provided we only update the current CPU's SLB shadow
+	 * buffer.
+	 */
+	ld	r9,PACA_SLBSHADOWPTR(r13)
+	li	r12,0
+	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
+	li	r12,SLBSHADOW_STACKVSID
+	STDX_BE	r7,r12,r9			/* Save VSID */
+	li	r12,SLBSHADOW_STACKESID
+	STDX_BE	r0,r12,r9			/* Save ESID */
+
+	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
+	 * we have 1TB segments, the only CPUs known to have the errata
+	 * only support less than 1TB of system memory and we'll never
+	 * actually hit this code path.
+	 */
+
+	isync
+	slbie	r6
+BEGIN_FTR_SECTION
+	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+	slbmte	r7,r0
+	isync
+2:	blr
+	.size pin_stack_slb,.-pin_stack_slb
+#endif /* CONFIG_PPC_64S_HASH_MMU */
+
+#else
+#define STOP_STREAMS
+#define FLUSH_COUNT_CACHE
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+/*
+ * do_switch_32/64 have the same calling convention as _switch, i.e., r3,r4
+ * are prev and next thread_struct *, and returns prev task_struct * in r3.
+
+ * This switches the stack, current, and does other task switch housekeeping.
+ */
+.macro do_switch_32
+	tophys(r0,r4)
+	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
+	lwz	r1,KSP(r4)	/* Load new stack pointer */
+
+	/* save the old current 'last' for return value */
+	mr	r3,r2
+	addi	r2,r4,-THREAD	/* Update current */
+.endm
+
+.macro do_switch_64
+	ld	r8,KSP(r4)	/* Load new stack pointer */
+
+	kuap_check_amr r9, r10
+
+	FLUSH_COUNT_CACHE	/* Clobbers r9, ctr */
+
+	STOP_STREAMS		/* Clobbers r6 */
+
+	addi	r3,r3,-THREAD	/* old thread -> task_struct for return value */
+	addi	r6,r4,-THREAD	/* new thread -> task_struct */
+	std	r6,PACACURRENT(r13)	/* Set new task_struct to 'current' */
+#if defined(CONFIG_STACKPROTECTOR)
+	ld	r6, TASK_CANARY(r6)
+	std	r6, PACA_CANARY(r13)
+#endif
+	/* Set new PACAKSAVE */
+	clrrdi	r7,r8,THREAD_SHIFT	/* base of new stack */
+	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
+	std	r7,PACAKSAVE(r13)
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+BEGIN_MMU_FTR_SECTION
+	bl	pin_stack_slb
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+#endif
+	/*
+	 * PMU interrupts in radix may come in here. They will use r1, not
+	 * PACAKSAVE, so this stack switch will not cause a problem. They
+	 * will store to the process stack, which may then be migrated to
+	 * another CPU. However the rq lock release on this CPU paired with
+	 * the rq lock acquire on the new CPU before the stack becomes
+	 * active on the new CPU, will order those stores.
+	 */
+	mr	r1,r8		/* start using new stack pointer */
+.endm
+
+/*
+ * This routine switches between two different tasks.  The process
+ * state of one is saved on its kernel stack.  Then the state
+ * of the other is restored from its kernel stack.  The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process.
+ * On entry, r3 points to the THREAD for the current task, r4
+ * points to the THREAD for the new task.
+ *
+ * This routine is always called with interrupts disabled.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path.  If you change this , you'll have to
+ * change the fork code also.
+ *
+ * The code which creates the new task context is in 'copy_thread'
+ * in arch/ppc/kernel/process.c
+ *
+ * Note: this uses SWITCH_FRAME_SIZE rather than USER_INT_FRAME_SIZE
+ * because we don't need to leave the redzone ABI gap at the top of
+ * the kernel stack.
+ */
+_GLOBAL(_switch)
+	PPC_CREATE_STACK_FRAME(SWITCH_FRAME_SIZE)
+	PPC_STL		r1,KSP(r3)	/* Set old stack pointer */
+	SAVE_NVGPRS(r1)			/* volatiles are caller-saved -- Cort */
+	PPC_STL		r0,_NIP(r1)	/* Return to switch caller */
+	mfcr		r0
+	stw		r0,_CCR(r1)
+
+	/*
+	 * On SMP kernels, care must be taken because a task may be
+	 * scheduled off CPUx and on to CPUy. Memory ordering must be
+	 * considered.
+	 *
+	 * Cacheable stores on CPUx will be visible when the task is
+	 * scheduled on CPUy by virtue of the core scheduler barriers
+	 * (see "Notes on Program-Order guarantees on SMP systems." in
+	 * kernel/sched/core.c).
+	 *
+	 * Uncacheable stores in the case of involuntary preemption must
+	 * be taken care of. The smp_mb__after_spinlock() in __schedule()
+	 * is implemented as hwsync on powerpc, which orders MMIO too. So
+	 * long as there is an hwsync in the context switch path, it will
+	 * be executed on the source CPU after the task has performed
+	 * all MMIO ops on that CPU, and on the destination CPU before the
+	 * task performs any MMIO ops there.
+	 */
+
+	/*
+	 * The kernel context switch path must contain a spin_lock,
+	 * which contains larx/stcx, which will clear any reservation
+	 * of the task being switched.
+	 */
+
+#ifdef CONFIG_PPC32
+	do_switch_32
+#else
+	do_switch_64
+#endif
+
+	lwz	r0,_CCR(r1)
+	mtcrf	0xFF,r0
+	REST_NVGPRS(r1)		/* volatiles are destroyed -- Cort */
+	PPC_LL	r0,_NIP(r1)	/* Return to _switch caller in new task */
+	mtlr	r0
+	addi	r1,r1,SWITCH_FRAME_SIZE
+	blr
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v3 6/6] powerpc/64: Rename entry_64.S to prom_entry_64.S
  2023-06-06 13:24 [PATCH v3 0/6] powerpc: merge _switch in 32/64 Nicholas Piggin
                   ` (4 preceding siblings ...)
  2023-06-06 13:24 ` [PATCH v3 5/6] powerpc: merge 32-bit and 64-bit _switch implementation Nicholas Piggin
@ 2023-06-06 13:24 ` Nicholas Piggin
  2023-07-03  5:26 ` [PATCH v3 0/6] powerpc: merge _switch in 32/64 Michael Ellerman
  6 siblings, 0 replies; 8+ messages in thread
From: Nicholas Piggin @ 2023-06-06 13:24 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

This file contains only the enter_prom implementation now.
Trim includes and update header comment while we're here.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/Makefile                  | 12 ++++----
 .../kernel/{entry_64.S => prom_entry_64.S}    | 30 ++-----------------
 scripts/head-object-list.txt                  |  2 +-
 3 files changed, 10 insertions(+), 34 deletions(-)
 rename arch/powerpc/kernel/{entry_64.S => prom_entry_64.S} (73%)

diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ec70a1748506..2919433be355 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -165,9 +165,6 @@ endif
 
 obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM)	+= tm.o
 
-obj-$(CONFIG_PPC64)		+= $(obj64-y)
-obj-$(CONFIG_PPC32)		+= $(obj32-y)
-
 ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)(CONFIG_PPC_BOOK3S),)
 obj-y				+= ppc_save_regs.o
 endif
@@ -209,10 +206,13 @@ CFLAGS_paca.o			+= -fno-stack-protector
 
 obj-$(CONFIG_PPC_FPU)		+= fpu.o
 obj-$(CONFIG_ALTIVEC)		+= vector.o
-obj-$(CONFIG_PPC64)		+= entry_64.o
-obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE)	+= prom_init.o
 
-extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE)	+= prom_init_check
+obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o
+obj64-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_entry_64.o
+extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init_check
+
+obj-$(CONFIG_PPC64)		+= $(obj64-y)
+obj-$(CONFIG_PPC32)		+= $(obj32-y)
 
 quiet_cmd_prom_init_check = PROMCHK $@
       cmd_prom_init_check = $(CONFIG_SHELL) $< "$(NM)" $(obj)/prom_init.o; touch $@
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/prom_entry_64.S
similarity index 73%
rename from arch/powerpc/kernel/entry_64.S
rename to arch/powerpc/kernel/prom_entry_64.S
index f3d3885ee9fd..f1b8793d28c6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/prom_entry_64.S
@@ -10,41 +10,17 @@
  *    Copyright (C) 1996 Paul Mackerras.
  *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  *
- *  This file contains the system call entry code, context switch
- *  code, and exception/interrupt return code for PowerPC.
+ *  This file contains the 64-bit prom entry code.
  */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <asm/cache.h>
-#include <asm/unistd.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-#include <asm/thread_info.h>
-#include <asm/code-patching-asm.h>
-#include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
-#include <asm/cputable.h>
-#include <asm/firmware.h>
-#include <asm/bug.h>
-#include <asm/ptrace.h>
-#include <asm/irqflags.h>
-#include <asm/hw_irq.h>
-#include <asm/context_tracking.h>
-#include <asm/ppc-opcode.h>
-#include <asm/barrier.h>
-#include <asm/export.h>
-#include <asm/asm-compat.h>
 #ifdef CONFIG_PPC_BOOK3S
 #include <asm/exception-64s.h>
 #else
 #include <asm/exception-64e.h>
 #endif
-#include <asm/feature-fixups.h>
-#include <asm/kup.h>
+#include <asm/ppc_asm.h>
 
-	.section	".text"
+.section ".text","ax",@progbits
 
 _GLOBAL(enter_prom)
 	mflr	r0
diff --git a/scripts/head-object-list.txt b/scripts/head-object-list.txt
index b2a0e21ea8d7..26359968744e 100644
--- a/scripts/head-object-list.txt
+++ b/scripts/head-object-list.txt
@@ -34,7 +34,7 @@ arch/powerpc/kernel/head_64.o
 arch/powerpc/kernel/head_8xx.o
 arch/powerpc/kernel/head_85xx.o
 arch/powerpc/kernel/head_book3s_32.o
-arch/powerpc/kernel/entry_64.o
+arch/powerpc/kernel/prom_entry_64.o
 arch/powerpc/kernel/fpu.o
 arch/powerpc/kernel/vector.o
 arch/powerpc/kernel/prom_init.o
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 0/6] powerpc: merge _switch in 32/64
  2023-06-06 13:24 [PATCH v3 0/6] powerpc: merge _switch in 32/64 Nicholas Piggin
                   ` (5 preceding siblings ...)
  2023-06-06 13:24 ` [PATCH v3 6/6] powerpc/64: Rename entry_64.S to prom_entry_64.S Nicholas Piggin
@ 2023-07-03  5:26 ` Michael Ellerman
  6 siblings, 0 replies; 8+ messages in thread
From: Michael Ellerman @ 2023-07-03  5:26 UTC (permalink / raw)
  To: linuxppc-dev, Nicholas Piggin

On Tue, 06 Jun 2023 23:24:41 +1000, Nicholas Piggin wrote:
> Since v2:
> - Add PPC_CREATE_STACK_FRAME() to abstract prologue differences.
> - Build fix.
> - Makefile tidy [Christophe]
> - Fix a missing SOB.
> 
> Since v1:
> - Don't re-order 32-bit prologue.
> - Improve Kconfig conditional includes.
> - Break out code changes into their own patches before merging,
>   so merge patch leaves generated code unchanged.
> - Change prom_entry.S to prom_entry_64.S.
> 
> [...]

Applied to powerpc/next.

[1/6] powerpc/64s: move stack SLB pinning out of line from _switch
      https://git.kernel.org/powerpc/c/d6b87c3eb6b2e0b34ba747df549e08768b019fe9
[2/6] powerpc/64: Rearrange 64-bit _switch to prepare for 32/64 merge
      https://git.kernel.org/powerpc/c/0eb8088b5a7524f96cadfb27083f5bdd819d9d52
[3/6] powerpc/32: Remove sync from _switch
      https://git.kernel.org/powerpc/c/fc8562c9b69af9533c39903b1601c378742189b0
[4/6] powerpc/32: Rearrange _switch to prepare for 32/64 merge
      https://git.kernel.org/powerpc/c/6958ad05d5789a303afe4fa4495df43993d9b7cb
[5/6] powerpc: merge 32-bit and 64-bit _switch implementation
      https://git.kernel.org/powerpc/c/afc6386815a88d067d9f567dcc6266800286f626
[6/6] powerpc/64: Rename entry_64.S to prom_entry_64.S
      https://git.kernel.org/powerpc/c/27be2456332dcd69907f086cda327ad923b222cf

cheers

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-07-03  5:56 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-06 13:24 [PATCH v3 0/6] powerpc: merge _switch in 32/64 Nicholas Piggin
2023-06-06 13:24 ` [PATCH v3 1/6] powerpc/64s: move stack SLB pinning out of line from _switch Nicholas Piggin
2023-06-06 13:24 ` [PATCH v3 2/6] powerpc/64: Rearrange 64-bit _switch to prepare for 32/64 merge Nicholas Piggin
2023-06-06 13:24 ` [PATCH v3 3/6] powerpc/32: Remove sync from _switch Nicholas Piggin
2023-06-06 13:24 ` [PATCH v3 4/6] powerpc/32: Rearrange _switch to prepare for 32/64 merge Nicholas Piggin
2023-06-06 13:24 ` [PATCH v3 5/6] powerpc: merge 32-bit and 64-bit _switch implementation Nicholas Piggin
2023-06-06 13:24 ` [PATCH v3 6/6] powerpc/64: Rename entry_64.S to prom_entry_64.S Nicholas Piggin
2023-07-03  5:26 ` [PATCH v3 0/6] powerpc: merge _switch in 32/64 Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.