linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] Fix 2 "[v3, 25/32] powerpc/64: system call implement entry/exit logic in C"
@ 2020-03-25  9:30 Nicholas Piggin
  2020-03-25  9:30 ` [PATCH 2/2] Fix 3 "[v3, 28/32] powerpc/64s: interrupt implement exit " Nicholas Piggin
  0 siblings, 1 reply; 2+ messages in thread
From: Nicholas Piggin @ 2020-03-25  9:30 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

This fixes 4 issues caught by TM selftests. First was a tm-syscall bug
that hit due to tabort_syscall being called after interrupts were reconciled
(in a subsequent patch), which led to interrupts being enabled before
tabort_syscall was called. Rather than going through an un-reconciling
interrupts for the return, I just go back to putting the test early in
asm, the C-ification of that wasn't a big win anyway.

Second is the syscall return _TIF_USER_WORK_MASK check would go into an
infinite loop if _TIF_RESTORE_TM became set. The asm code uses
_TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state.

Third is system call return was not calling restore_tm_state, I missed
this completely (alhtough it's in the return from interrupt C conversion
because when the asm syscall code encountered problems it would branch
to the interrupt return code.

Fourth is MSR_VEC missing from restore_math, which was caught by
tm-unavailable selftest taking an unexpected facility unavailable
interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1.
Fourth case also has a fixup in a subsequent patch.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/entry_64.S   | 12 +++++++++---
 arch/powerpc/kernel/syscall_64.c | 25 +++++++++++++------------
 2 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index dc7fd3196d20..403224acdaa8 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -64,6 +64,12 @@ exception_marker:
 
 	.globl system_call_common
 system_call_common:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+	bne	.Ltabort_syscall
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
 _ASM_NOKPROBE_SYMBOL(system_call_common)
 	mr	r10,r1
 	ld	r1,PACAKSAVE(r13)
@@ -179,7 +185,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 	b	.Lsyscall_restore_regs_cont
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-_GLOBAL(tabort_syscall) /* (unsigned long nip, unsigned long msr) */
+.Ltabort_syscall:
 	/* Firstly we need to enable TM in the kernel */
 	mfmsr	r10
 	li	r9, 1
@@ -199,8 +205,8 @@ _GLOBAL(tabort_syscall) /* (unsigned long nip, unsigned long msr) */
 	li	r9, MSR_RI
 	andc	r10, r10, r9
 	mtmsrd	r10, 1
-	mtspr	SPRN_SRR0, r3
-	mtspr	SPRN_SRR1, r4
+	mtspr	SPRN_SRR0, r11
+	mtspr	SPRN_SRR1, r12
 	RFI_TO_USER
 	b	.	/* prevent speculative execution */
 #endif
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index ffd601d87065..56533a26f3b7 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -15,8 +15,6 @@
 #include <asm/time.h>
 #include <asm/unistd.h>
 
-extern void __noreturn tabort_syscall(unsigned long nip, unsigned long msr);
-
 typedef long (*syscall_fn)(long, long, long, long, long, long);
 
 /* Has to run notrace because it is entered "unreconciled" */
@@ -32,10 +30,6 @@ notrace long system_call_exception(long r3, long r4, long r5, long r6, long r7,
 	BUG_ON(!FULL_REGS(regs));
 	BUG_ON(regs->softe != IRQS_ENABLED);
 
-	if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
-	    unlikely(regs->msr & MSR_TS_T))
-		tabort_syscall(regs->nip, regs->msr);
-
 	account_cpu_user_entry();
 
 #ifdef CONFIG_PPC_SPLPAR
@@ -161,7 +155,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
 again:
 	local_irq_disable();
 	ti_flags = READ_ONCE(*ti_flagsp);
-	while (unlikely(ti_flags & _TIF_USER_WORK_MASK)) {
+	while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
 		local_irq_enable();
 		if (ti_flags & _TIF_NEED_RESCHED) {
 			schedule();
@@ -180,13 +174,20 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
 	}
 
 	if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
-		unsigned long mathflags = MSR_FP;
+		if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+				unlikely((ti_flags & _TIF_RESTORE_TM))) {
+			restore_tm_state(regs);
+		} else {
+			unsigned long mathflags = MSR_FP;
 
-		if (IS_ENABLED(CONFIG_ALTIVEC))
-			mathflags |= MSR_VEC;
+			if (cpu_has_feature(CPU_FTR_VSX))
+				mathflags |= MSR_VEC | MSR_VSX;
+			else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+				mathflags |= MSR_VEC;
 
-		if ((regs->msr & mathflags) != mathflags)
-			restore_math(regs);
+			if ((regs->msr & mathflags) != mathflags)
+				restore_math(regs);
+		}
 	}
 
 	/* This must be done with RI=1 because tracing may touch vmaps */
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [PATCH 2/2] Fix 3 "[v3, 28/32] powerpc/64s: interrupt implement exit logic in C"
  2020-03-25  9:30 [PATCH 1/2] Fix 2 "[v3, 25/32] powerpc/64: system call implement entry/exit logic in C" Nicholas Piggin
@ 2020-03-25  9:30 ` Nicholas Piggin
  0 siblings, 0 replies; 2+ messages in thread
From: Nicholas Piggin @ 2020-03-25  9:30 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin

This fixes the interrupt-return part of the MSR_VSX restore bug caught
by tm-unavailable selftest.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kernel/syscall_64.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index 56533a26f3b7..a2995909b83b 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -251,19 +251,21 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
 		ti_flags = READ_ONCE(*ti_flagsp);
 	}
 
-	if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
-		unsigned long mathflags = 0;
-
-		if (IS_ENABLED(CONFIG_PPC_FPU))
-			mathflags |= MSR_FP;
-		if (IS_ENABLED(CONFIG_ALTIVEC))
-			mathflags |= MSR_VEC;
-
+	if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
 		if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
-						(ti_flags & _TIF_RESTORE_TM))
+				unlikely((ti_flags & _TIF_RESTORE_TM))) {
 			restore_tm_state(regs);
-		else if ((regs->msr & mathflags) != mathflags)
-			restore_math(regs);
+		} else {
+			unsigned long mathflags = MSR_FP;
+
+			if (cpu_has_feature(CPU_FTR_VSX))
+				mathflags |= MSR_VEC | MSR_VSX;
+			else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+				mathflags |= MSR_VEC;
+
+			if ((regs->msr & mathflags) != mathflags)
+				restore_math(regs);
+		}
 	}
 
 	trace_hardirqs_on();
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-03-25  9:34 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-25  9:30 [PATCH 1/2] Fix 2 "[v3, 25/32] powerpc/64: system call implement entry/exit logic in C" Nicholas Piggin
2020-03-25  9:30 ` [PATCH 2/2] Fix 3 "[v3, 28/32] powerpc/64s: interrupt implement exit " Nicholas Piggin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).