linux-parisc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] parisc: Various spin lock optimizations
@ 2020-07-19 18:41 John David Anglin
  2020-07-20 13:18 ` John David Anglin
  0 siblings, 1 reply; 2+ messages in thread
From: John David Anglin @ 2020-07-19 18:41 UTC (permalink / raw)
  To: linux-parisc; +Cc: Helge Deller, James Bottomley

While investigating the stall problem, I looked closely at our spin lock implementation and found
a number of minor issues.

Regarding arch_spin_is_locked(), I wasn't convinced that the barrier was correct, so I switched the
code to use READ_ONCE.

Regarding arch_spin_lock(), cpu_relax() slightly pessimizes the loop code generated by gcc.  The pointer
"a" is volatile, so we can just use continue.

Regarding arch_spin_lock_flags(), I went back to the old code which just toggles interrupts on and off
in the wait loop.  It's rather dangerous to allow the routine to set all the PSW flag bits and wierd
things happen if local_save_flags() is moved.

Regarding arch_spin_unlock(), I defined a new barrier __ldcw_mb().  It optimizes to a nop when a SMP
kernel is used on a non SMP machine.

Finally regarding arch_spin_trylock(), I just shortened the C code.

Signed-off-by: Dave Anglin <dave.anglin@bell.net>
---

diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index e080143e79a3..0b182450a2fb 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -51,6 +51,19 @@
 	__ret;							\
 })

+/* LDCW is strongly ordered and can be used as a memory barrier
+   when a suitably aligned address is available. */
+#ifdef CONFIG_SMP
+#define __ldcw_mb(a) ({						\
+	unsigned __tmp;						\
+	__asm__ __volatile__(__LDCW " 0(%1),%0"			\
+	ALTERNATIVE(ALT_COND_NO_SMP, INSN_NOP)			\
+	: "=r" (__tmp) : "r" (a) : "memory");			\
+})
+#else
+#define __ldcw_mb(a) barrier();
+#endif
+
 #ifdef CONFIG_SMP
 # define __lock_aligned __section(.data..lock_aligned)
 #endif
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 70fecb8dc4e2..dd13753e20de 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -10,8 +10,7 @@
 static inline int arch_spin_is_locked(arch_spinlock_t *x)
 {
 	volatile unsigned int *a = __ldcw_align(x);
-	smp_mb();
-	return *a == 0;
+	return READ_ONCE(*a) == 0;
 }

 static inline void arch_spin_lock(arch_spinlock_t *x)
@@ -21,22 +20,21 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
 	a = __ldcw_align(x);
 	while (__ldcw(a) == 0)
 		while (*a == 0)
-			cpu_relax();
+			continue;
 }

 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
-					 unsigned long flags)
+					  unsigned long flags)
 {
 	volatile unsigned int *a;
-	unsigned long flags_dis;

 	a = __ldcw_align(x);
 	while (__ldcw(a) == 0) {
-		local_save_flags(flags_dis);
-		local_irq_restore(flags);
 		while (*a == 0)
-			cpu_relax();
-		local_irq_restore(flags_dis);
+			if (flags & PSW_SM_I) {
+				local_irq_enable();
+				local_irq_disable();
+			}
 	}
 }
 #define arch_spin_lock_flags arch_spin_lock_flags
@@ -46,23 +44,16 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
 	volatile unsigned int *a;

 	a = __ldcw_align(x);
-#ifdef CONFIG_SMP
-	(void) __ldcw(a);
-#else
-	mb();
-#endif
+	__ldcw_mb(a);
 	*a = 1;
 }

 static inline int arch_spin_trylock(arch_spinlock_t *x)
 {
 	volatile unsigned int *a;
-	int ret;

 	a = __ldcw_align(x);
-        ret = __ldcw(a) != 0;
-
-	return ret;
+	return __ldcw(a) != 0;
 }

 /*

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] parisc: Various spin lock optimizations
  2020-07-19 18:41 [PATCH] parisc: Various spin lock optimizations John David Anglin
@ 2020-07-20 13:18 ` John David Anglin
  0 siblings, 0 replies; 2+ messages in thread
From: John David Anglin @ 2020-07-20 13:18 UTC (permalink / raw)
  To: linux-parisc; +Cc: Helge Deller, James Bottomley

On 2020-07-19 2:41 p.m., John David Anglin wrote:
>  #define arch_spin_lock_flags arch_spin_lock_flags
> @@ -46,23 +44,16 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
>  	volatile unsigned int *a;
>
>  	a = __ldcw_align(x);
> -#ifdef CONFIG_SMP
> -	(void) __ldcw(a);
> -#else
> -	mb();
> -#endif
> +	__ldcw_mb(a);
>  	*a = 1;
>  }
I'm retesting using an ordered store for the release.  So far, it's looking okay.

Dave

-- 
John David Anglin  dave.anglin@bell.net


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-07-20 13:18 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-19 18:41 [PATCH] parisc: Various spin lock optimizations John David Anglin
2020-07-20 13:18 ` John David Anglin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).