linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] locking: remove spin_lock_flags() etc
@ 2021-10-22 11:59 Arnd Bergmann
  2021-10-22 14:10 ` Helge Deller
                   ` (2 more replies)
  0 siblings, 3 replies; 13+ messages in thread
From: Arnd Bergmann @ 2021-10-22 11:59 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon
  Cc: Arnd Bergmann, Waiman Long, Boqun Feng, Jonas Bonn,
	Stefan Kristiansson, Stafford Horne, James E.J. Bottomley,
	Helge Deller, Michael Ellerman, Benjamin Herrenschmidt,
	Paul Mackerras, Heiko Carstens, Vasily Gorbik,
	Christian Borntraeger, Alexander Gordeev, linux-kernel,
	linux-ia64, openrisc, linux-parisc, linuxppc-dev, linux-s390

From: Arnd Bergmann <arnd@arndb.de>

parisc, ia64 and powerpc32 are the only remaining architectures that
provide custom arch_{spin,read,write}_lock_flags() functions, which are
meant to re-enable interrupts while waiting for a spinlock.

However, none of these can actually run into this codepath, because
it is only called on architectures without CONFIG_GENERIC_LOCKBREAK,
or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none
of those combinations are possible on the three architectures.

Going back in the git history, it appears that arch/mn10300 may have
been able to run into this code path, but there is a good chance that
it never worked. On the architectures that still exist, it was
already impossible to hit back in 2008 after the introduction of
CONFIG_GENERIC_LOCKBREAK, and possibly earlier.

As this is all dead code, just remove it and the helper functions built
around it. For arch/ia64, the inline asm could be cleaned up, but
it seems safer to leave it untouched.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 arch/ia64/include/asm/spinlock.h           | 23 ++++++----------------
 arch/openrisc/include/asm/spinlock.h       |  3 ---
 arch/parisc/include/asm/spinlock.h         | 15 --------------
 arch/powerpc/include/asm/simple_spinlock.h | 21 --------------------
 arch/s390/include/asm/spinlock.h           |  8 --------
 include/linux/lockdep.h                    | 17 ----------------
 include/linux/rwlock.h                     | 15 --------------
 include/linux/rwlock_api_smp.h             |  6 ++----
 include/linux/spinlock.h                   | 13 ------------
 include/linux/spinlock_api_smp.h           |  9 ---------
 include/linux/spinlock_up.h                |  1 -
 kernel/locking/spinlock.c                  |  3 +--
 12 files changed, 9 insertions(+), 125 deletions(-)

diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 864775970c50..0e5c1ad3239c 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -124,18 +124,13 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 	__ticket_spin_unlock(lock);
 }
 
-static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
-						  unsigned long flags)
-{
-	arch_spin_lock(lock);
-}
-#define arch_spin_lock_flags	arch_spin_lock_flags
-
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
-arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_read_lock(arch_rwlock_t *lock)
 {
+	unsigned long flags = 0;
+
 	__asm__ __volatile__ (
 		"tbit.nz p6, p0 = %1,%2\n"
 		"br.few 3f\n"
@@ -157,13 +152,8 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
 		: "p6", "p7", "r2", "memory");
 }
 
-#define arch_read_lock_flags arch_read_lock_flags
-#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
-
 #else /* !ASM_SUPPORTED */
 
-#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-
 #define arch_read_lock(rw)								\
 do {											\
 	arch_rwlock_t *__read_lock_ptr = (rw);						\
@@ -186,8 +176,10 @@ do {								\
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
-arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_write_lock(arch_rwlock_t *lock)
 {
+	unsigned long flags = 0;
+
 	__asm__ __volatile__ (
 		"tbit.nz p6, p0 = %1, %2\n"
 		"mov ar.ccv = r0\n"
@@ -210,9 +202,6 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
 		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
 }
 
-#define arch_write_lock_flags arch_write_lock_flags
-#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-
 #define arch_write_trylock(rw)							\
 ({										\
 	register long result;							\
diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
index a8940bdfcb7e..264944a71535 100644
--- a/arch/openrisc/include/asm/spinlock.h
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -19,9 +19,6 @@
 
 #include <asm/qrwlock.h>
 
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
 #define arch_spin_relax(lock)	cpu_relax()
 #define arch_read_relax(lock)	cpu_relax()
 #define arch_write_relax(lock)	cpu_relax()
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fa5ee8a45dbd..a6e5d66a7656 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -23,21 +23,6 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
 			continue;
 }
 
-static inline void arch_spin_lock_flags(arch_spinlock_t *x,
-					unsigned long flags)
-{
-	volatile unsigned int *a;
-
-	a = __ldcw_align(x);
-	while (__ldcw(a) == 0)
-		while (*a == 0)
-			if (flags & PSW_SM_I) {
-				local_irq_enable();
-				local_irq_disable();
-			}
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
 static inline void arch_spin_unlock(arch_spinlock_t *x)
 {
 	volatile unsigned int *a;
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 8985791a2ba5..7ae6aeef8464 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 	}
 }
 
-static inline
-void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
-	unsigned long flags_dis;
-
-	while (1) {
-		if (likely(__arch_spin_trylock(lock) == 0))
-			break;
-		local_save_flags(flags_dis);
-		local_irq_restore(flags);
-		do {
-			HMT_low();
-			if (is_shared_processor())
-				splpar_spin_yield(lock);
-		} while (unlikely(lock->slock != 0));
-		HMT_medium();
-		local_irq_restore(flags_dis);
-	}
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
 	__asm__ __volatile__("# arch_spin_unlock\n\t"
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index ef59588a3042..888a2f1c9ee3 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -67,14 +67,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lp)
 		arch_spin_lock_wait(lp);
 }
 
-static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
-					unsigned long flags)
-{
-	if (!arch_spin_trylock_once(lp))
-		arch_spin_lock_wait(lp);
-}
-#define arch_spin_lock_flags	arch_spin_lock_flags
-
 static inline int arch_spin_trylock(arch_spinlock_t *lp)
 {
 	if (!arch_spin_trylock_once(lp))
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9fe165beb0f9..467b94257105 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -481,23 +481,6 @@ do {								\
 
 #endif /* CONFIG_LOCK_STAT */
 
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
-	LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
-	lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
 #ifdef CONFIG_PROVE_LOCKING
 extern void print_irqtrace_events(struct task_struct *curr);
 #else
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 7ce9a51ae5c0..2c0ad417ce3c 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -30,31 +30,16 @@ do {								\
 
 #ifdef CONFIG_DEBUG_SPINLOCK
  extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
  extern int do_raw_read_trylock(rwlock_t *lock);
  extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
  extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
  extern int do_raw_write_trylock(rwlock_t *lock);
  extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
 #else
-
-#ifndef arch_read_lock_flags
-# define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
-#endif
-
-#ifndef arch_write_lock_flags
-# define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
-#endif
-
 # define do_raw_read_lock(rwlock)	do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
-		do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
 # define do_raw_read_trylock(rwlock)	arch_read_trylock(&(rwlock)->raw_lock)
 # define do_raw_read_unlock(rwlock)	do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
 # define do_raw_write_lock(rwlock)	do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_lock_flags(lock, flags) \
-		do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
 # define do_raw_write_trylock(rwlock)	arch_write_trylock(&(rwlock)->raw_lock)
 # define do_raw_write_unlock(rwlock)	do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
 #endif
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index abfb53ab11be..f1db6f17c4fb 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -157,8 +157,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
 	local_irq_save(flags);
 	preempt_disable();
 	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-	LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
-			     do_raw_read_lock_flags, &flags);
+	LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
 	return flags;
 }
 
@@ -184,8 +183,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
 	local_irq_save(flags);
 	preempt_disable();
 	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
-			     do_raw_write_lock_flags, &flags);
+	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
 	return flags;
 }
 
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index c04e99edfe92..40e467cdee2d 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -176,7 +176,6 @@ do {									\
 
 #ifdef CONFIG_DEBUG_SPINLOCK
  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
-#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
 #else
@@ -187,18 +186,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
 	mmiowb_spin_lock();
 }
 
-#ifndef arch_spin_lock_flags
-#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
-#endif
-
-static inline void
-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
-{
-	__acquire(lock);
-	arch_spin_lock_flags(&lock->raw_lock, *flags);
-	mmiowb_spin_lock();
-}
-
 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
 {
 	int ret = arch_spin_trylock(&(lock)->raw_lock);
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 6b8e1a0b137b..51fa0dab68c4 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
 	local_irq_save(flags);
 	preempt_disable();
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	/*
-	 * On lockdep we dont want the hand-coded irq-enable of
-	 * do_raw_spin_lock_flags() code, because lockdep assumes
-	 * that interrupts are not re-enabled during lock-acquire:
-	 */
-#ifdef CONFIG_LOCKDEP
 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
-#else
-	do_raw_spin_lock_flags(lock, &flags);
-#endif
 	return flags;
 }
 
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0ac9112c1bbe..16521074b6f7 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 #define arch_spin_is_locked(lock)	((void)(lock), 0)
 /* for sched/core.c and kernel_lock.c: */
 # define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0)
 # define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0)
 # define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })
 #endif /* DEBUG_SPINLOCK */
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index c5830cfa379a..b562f9289372 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -378,8 +378,7 @@ unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
 	local_irq_save(flags);
 	preempt_disable();
 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
-				do_raw_spin_lock_flags, &flags);
+	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 	return flags;
 }
 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
-- 
2.29.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-22 11:59 [PATCH] locking: remove spin_lock_flags() etc Arnd Bergmann
@ 2021-10-22 14:10 ` Helge Deller
  2021-10-23  1:37 ` Waiman Long
  2021-10-30 14:47 ` [tip: locking/core] locking: Remove " tip-bot2 for Arnd Bergmann
  2 siblings, 0 replies; 13+ messages in thread
From: Helge Deller @ 2021-10-22 14:10 UTC (permalink / raw)
  To: Arnd Bergmann, Peter Zijlstra, Ingo Molnar, Will Deacon
  Cc: Arnd Bergmann, Waiman Long, Boqun Feng, Jonas Bonn,
	Stefan Kristiansson, Stafford Horne, James E.J. Bottomley,
	Michael Ellerman, Benjamin Herrenschmidt, Paul Mackerras,
	Heiko Carstens, Vasily Gorbik, Christian Borntraeger,
	Alexander Gordeev, linux-kernel, linux-ia64, openrisc,
	linux-parisc, linuxppc-dev, linux-s390

On 10/22/21 13:59, Arnd Bergmann wrote:
> From: Arnd Bergmann <arnd@arndb.de>
>
> parisc, ia64 and powerpc32 are the only remaining architectures that
> provide custom arch_{spin,read,write}_lock_flags() functions, which are
> meant to re-enable interrupts while waiting for a spinlock.
>
> However, none of these can actually run into this codepath, because
> it is only called on architectures without CONFIG_GENERIC_LOCKBREAK,
> or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none
> of those combinations are possible on the three architectures.
>
> Going back in the git history, it appears that arch/mn10300 may have
> been able to run into this code path, but there is a good chance that
> it never worked. On the architectures that still exist, it was
> already impossible to hit back in 2008 after the introduction of
> CONFIG_GENERIC_LOCKBREAK, and possibly earlier.
>
> As this is all dead code, just remove it and the helper functions built
> around it. For arch/ia64, the inline asm could be cleaned up, but
> it seems safer to leave it untouched.
>
> Signed-off-by: Arnd Bergmann <arnd@arndb.de>

Acked-by: Helge Deller <deller@gmx.de>  # parisc

Helge

> ---
>  arch/ia64/include/asm/spinlock.h           | 23 ++++++----------------
>  arch/openrisc/include/asm/spinlock.h       |  3 ---
>  arch/parisc/include/asm/spinlock.h         | 15 --------------
>  arch/powerpc/include/asm/simple_spinlock.h | 21 --------------------
>  arch/s390/include/asm/spinlock.h           |  8 --------
>  include/linux/lockdep.h                    | 17 ----------------
>  include/linux/rwlock.h                     | 15 --------------
>  include/linux/rwlock_api_smp.h             |  6 ++----
>  include/linux/spinlock.h                   | 13 ------------
>  include/linux/spinlock_api_smp.h           |  9 ---------
>  include/linux/spinlock_up.h                |  1 -
>  kernel/locking/spinlock.c                  |  3 +--
>  12 files changed, 9 insertions(+), 125 deletions(-)
>
> diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
> index 864775970c50..0e5c1ad3239c 100644
> --- a/arch/ia64/include/asm/spinlock.h
> +++ b/arch/ia64/include/asm/spinlock.h
> @@ -124,18 +124,13 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
>  	__ticket_spin_unlock(lock);
>  }
>
> -static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
> -						  unsigned long flags)
> -{
> -	arch_spin_lock(lock);
> -}
> -#define arch_spin_lock_flags	arch_spin_lock_flags
> -
>  #ifdef ASM_SUPPORTED
>
>  static __always_inline void
> -arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
> +arch_read_lock(arch_rwlock_t *lock)
>  {
> +	unsigned long flags = 0;
> +
>  	__asm__ __volatile__ (
>  		"tbit.nz p6, p0 = %1,%2\n"
>  		"br.few 3f\n"
> @@ -157,13 +152,8 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
>  		: "p6", "p7", "r2", "memory");
>  }
>
> -#define arch_read_lock_flags arch_read_lock_flags
> -#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
> -
>  #else /* !ASM_SUPPORTED */
>
> -#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
> -
>  #define arch_read_lock(rw)								\
>  do {											\
>  	arch_rwlock_t *__read_lock_ptr = (rw);						\
> @@ -186,8 +176,10 @@ do {								\
>  #ifdef ASM_SUPPORTED
>
>  static __always_inline void
> -arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
> +arch_write_lock(arch_rwlock_t *lock)
>  {
> +	unsigned long flags = 0;
> +
>  	__asm__ __volatile__ (
>  		"tbit.nz p6, p0 = %1, %2\n"
>  		"mov ar.ccv = r0\n"
> @@ -210,9 +202,6 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
>  		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
>  }
>
> -#define arch_write_lock_flags arch_write_lock_flags
> -#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
> -
>  #define arch_write_trylock(rw)							\
>  ({										\
>  	register long result;							\
> diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
> index a8940bdfcb7e..264944a71535 100644
> --- a/arch/openrisc/include/asm/spinlock.h
> +++ b/arch/openrisc/include/asm/spinlock.h
> @@ -19,9 +19,6 @@
>
>  #include <asm/qrwlock.h>
>
> -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
> -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
> -
>  #define arch_spin_relax(lock)	cpu_relax()
>  #define arch_read_relax(lock)	cpu_relax()
>  #define arch_write_relax(lock)	cpu_relax()
> diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
> index fa5ee8a45dbd..a6e5d66a7656 100644
> --- a/arch/parisc/include/asm/spinlock.h
> +++ b/arch/parisc/include/asm/spinlock.h
> @@ -23,21 +23,6 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
>  			continue;
>  }
>
> -static inline void arch_spin_lock_flags(arch_spinlock_t *x,
> -					unsigned long flags)
> -{
> -	volatile unsigned int *a;
> -
> -	a = __ldcw_align(x);
> -	while (__ldcw(a) == 0)
> -		while (*a == 0)
> -			if (flags & PSW_SM_I) {
> -				local_irq_enable();
> -				local_irq_disable();
> -			}
> -}
> -#define arch_spin_lock_flags arch_spin_lock_flags
> -
>  static inline void arch_spin_unlock(arch_spinlock_t *x)
>  {
>  	volatile unsigned int *a;
> diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
> index 8985791a2ba5..7ae6aeef8464 100644
> --- a/arch/powerpc/include/asm/simple_spinlock.h
> +++ b/arch/powerpc/include/asm/simple_spinlock.h
> @@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
>  	}
>  }
>
> -static inline
> -void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
> -{
> -	unsigned long flags_dis;
> -
> -	while (1) {
> -		if (likely(__arch_spin_trylock(lock) == 0))
> -			break;
> -		local_save_flags(flags_dis);
> -		local_irq_restore(flags);
> -		do {
> -			HMT_low();
> -			if (is_shared_processor())
> -				splpar_spin_yield(lock);
> -		} while (unlikely(lock->slock != 0));
> -		HMT_medium();
> -		local_irq_restore(flags_dis);
> -	}
> -}
> -#define arch_spin_lock_flags arch_spin_lock_flags
> -
>  static inline void arch_spin_unlock(arch_spinlock_t *lock)
>  {
>  	__asm__ __volatile__("# arch_spin_unlock\n\t"
> diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
> index ef59588a3042..888a2f1c9ee3 100644
> --- a/arch/s390/include/asm/spinlock.h
> +++ b/arch/s390/include/asm/spinlock.h
> @@ -67,14 +67,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lp)
>  		arch_spin_lock_wait(lp);
>  }
>
> -static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
> -					unsigned long flags)
> -{
> -	if (!arch_spin_trylock_once(lp))
> -		arch_spin_lock_wait(lp);
> -}
> -#define arch_spin_lock_flags	arch_spin_lock_flags
> -
>  static inline int arch_spin_trylock(arch_spinlock_t *lp)
>  {
>  	if (!arch_spin_trylock_once(lp))
> diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
> index 9fe165beb0f9..467b94257105 100644
> --- a/include/linux/lockdep.h
> +++ b/include/linux/lockdep.h
> @@ -481,23 +481,6 @@ do {								\
>
>  #endif /* CONFIG_LOCK_STAT */
>
> -#ifdef CONFIG_LOCKDEP
> -
> -/*
> - * On lockdep we dont want the hand-coded irq-enable of
> - * _raw_*_lock_flags() code, because lockdep assumes
> - * that interrupts are not re-enabled during lock-acquire:
> - */
> -#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
> -	LOCK_CONTENDED((_lock), (try), (lock))
> -
> -#else /* CONFIG_LOCKDEP */
> -
> -#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
> -	lockfl((_lock), (flags))
> -
> -#endif /* CONFIG_LOCKDEP */
> -
>  #ifdef CONFIG_PROVE_LOCKING
>  extern void print_irqtrace_events(struct task_struct *curr);
>  #else
> diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
> index 7ce9a51ae5c0..2c0ad417ce3c 100644
> --- a/include/linux/rwlock.h
> +++ b/include/linux/rwlock.h
> @@ -30,31 +30,16 @@ do {								\
>
>  #ifdef CONFIG_DEBUG_SPINLOCK
>   extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
> -#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
>   extern int do_raw_read_trylock(rwlock_t *lock);
>   extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
>   extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
> -#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
>   extern int do_raw_write_trylock(rwlock_t *lock);
>   extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
>  #else
> -
> -#ifndef arch_read_lock_flags
> -# define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
> -#endif
> -
> -#ifndef arch_write_lock_flags
> -# define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
> -#endif
> -
>  # define do_raw_read_lock(rwlock)	do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
> -# define do_raw_read_lock_flags(lock, flags) \
> -		do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
>  # define do_raw_read_trylock(rwlock)	arch_read_trylock(&(rwlock)->raw_lock)
>  # define do_raw_read_unlock(rwlock)	do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
>  # define do_raw_write_lock(rwlock)	do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
> -# define do_raw_write_lock_flags(lock, flags) \
> -		do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
>  # define do_raw_write_trylock(rwlock)	arch_write_trylock(&(rwlock)->raw_lock)
>  # define do_raw_write_unlock(rwlock)	do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
>  #endif
> diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
> index abfb53ab11be..f1db6f17c4fb 100644
> --- a/include/linux/rwlock_api_smp.h
> +++ b/include/linux/rwlock_api_smp.h
> @@ -157,8 +157,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
>  	local_irq_save(flags);
>  	preempt_disable();
>  	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
> -	LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
> -			     do_raw_read_lock_flags, &flags);
> +	LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
>  	return flags;
>  }
>
> @@ -184,8 +183,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
>  	local_irq_save(flags);
>  	preempt_disable();
>  	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
> -	LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
> -			     do_raw_write_lock_flags, &flags);
> +	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
>  	return flags;
>  }
>
> diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
> index c04e99edfe92..40e467cdee2d 100644
> --- a/include/linux/spinlock.h
> +++ b/include/linux/spinlock.h
> @@ -176,7 +176,6 @@ do {									\
>
>  #ifdef CONFIG_DEBUG_SPINLOCK
>   extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
> -#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
>   extern int do_raw_spin_trylock(raw_spinlock_t *lock);
>   extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
>  #else
> @@ -187,18 +186,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
>  	mmiowb_spin_lock();
>  }
>
> -#ifndef arch_spin_lock_flags
> -#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
> -#endif
> -
> -static inline void
> -do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
> -{
> -	__acquire(lock);
> -	arch_spin_lock_flags(&lock->raw_lock, *flags);
> -	mmiowb_spin_lock();
> -}
> -
>  static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
>  {
>  	int ret = arch_spin_trylock(&(lock)->raw_lock);
> diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
> index 6b8e1a0b137b..51fa0dab68c4 100644
> --- a/include/linux/spinlock_api_smp.h
> +++ b/include/linux/spinlock_api_smp.h
> @@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
>  	local_irq_save(flags);
>  	preempt_disable();
>  	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
> -	/*
> -	 * On lockdep we dont want the hand-coded irq-enable of
> -	 * do_raw_spin_lock_flags() code, because lockdep assumes
> -	 * that interrupts are not re-enabled during lock-acquire:
> -	 */
> -#ifdef CONFIG_LOCKDEP
>  	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
> -#else
> -	do_raw_spin_lock_flags(lock, &flags);
> -#endif
>  	return flags;
>  }
>
> diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
> index 0ac9112c1bbe..16521074b6f7 100644
> --- a/include/linux/spinlock_up.h
> +++ b/include/linux/spinlock_up.h
> @@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
>  #define arch_spin_is_locked(lock)	((void)(lock), 0)
>  /* for sched/core.c and kernel_lock.c: */
>  # define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0)
> -# define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0)
>  # define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0)
>  # define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })
>  #endif /* DEBUG_SPINLOCK */
> diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
> index c5830cfa379a..b562f9289372 100644
> --- a/kernel/locking/spinlock.c
> +++ b/kernel/locking/spinlock.c
> @@ -378,8 +378,7 @@ unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
>  	local_irq_save(flags);
>  	preempt_disable();
>  	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
> -	LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
> -				do_raw_spin_lock_flags, &flags);
> +	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
>  	return flags;
>  }
>  EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
>


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-22 11:59 [PATCH] locking: remove spin_lock_flags() etc Arnd Bergmann
  2021-10-22 14:10 ` Helge Deller
@ 2021-10-23  1:37 ` Waiman Long
  2021-10-23 16:04   ` Arnd Bergmann
  2021-10-30 14:47 ` [tip: locking/core] locking: Remove " tip-bot2 for Arnd Bergmann
  2 siblings, 1 reply; 13+ messages in thread
From: Waiman Long @ 2021-10-23  1:37 UTC (permalink / raw)
  To: Arnd Bergmann, Peter Zijlstra, Ingo Molnar, Will Deacon
  Cc: Arnd Bergmann, Boqun Feng, Jonas Bonn, Stefan Kristiansson,
	Stafford Horne, James E.J. Bottomley, Helge Deller,
	Michael Ellerman, Benjamin Herrenschmidt, Paul Mackerras,
	Heiko Carstens, Vasily Gorbik, Christian Borntraeger,
	Alexander Gordeev, linux-kernel, linux-ia64, openrisc,
	linux-parisc, linuxppc-dev, linux-s390

On 10/22/21 7:59 AM, Arnd Bergmann wrote:
> From: Arnd Bergmann <arnd@arndb.de>
>
> parisc, ia64 and powerpc32 are the only remaining architectures that
> provide custom arch_{spin,read,write}_lock_flags() functions, which are
> meant to re-enable interrupts while waiting for a spinlock.
>
> However, none of these can actually run into this codepath, because
> it is only called on architectures without CONFIG_GENERIC_LOCKBREAK,
> or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none
> of those combinations are possible on the three architectures.
>
> Going back in the git history, it appears that arch/mn10300 may have
> been able to run into this code path, but there is a good chance that
> it never worked. On the architectures that still exist, it was
> already impossible to hit back in 2008 after the introduction of
> CONFIG_GENERIC_LOCKBREAK, and possibly earlier.
>
> As this is all dead code, just remove it and the helper functions built
> around it. For arch/ia64, the inline asm could be cleaned up, but
> it seems safer to leave it untouched.
>
> Signed-off-by: Arnd Bergmann <arnd@arndb.de>

Does that mean we can also remove the GENERIC_LOCKBREAK config option 
from the Kconfig files as well?

Cheers,
Longman



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-23  1:37 ` Waiman Long
@ 2021-10-23 16:04   ` Arnd Bergmann
  2021-10-25  9:57     ` Peter Zijlstra
  0 siblings, 1 reply; 13+ messages in thread
From: Arnd Bergmann @ 2021-10-23 16:04 UTC (permalink / raw)
  To: Waiman Long
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Arnd Bergmann,
	Boqun Feng, Jonas Bonn, Stefan Kristiansson, Stafford Horne,
	James E.J. Bottomley, Helge Deller, Michael Ellerman,
	Benjamin Herrenschmidt, Paul Mackerras, Heiko Carstens,
	Vasily Gorbik, Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390

On Sat, Oct 23, 2021 at 3:37 AM Waiman Long <longman@redhat.com> wrote:
>> On 10/22/21 7:59 AM, Arnd Bergmann wrote:
> > From: Arnd Bergmann <arnd@arndb.de>
> >
> > As this is all dead code, just remove it and the helper functions built
> > around it. For arch/ia64, the inline asm could be cleaned up, but
> > it seems safer to leave it untouched.
> >
> > Signed-off-by: Arnd Bergmann <arnd@arndb.de>
>
> Does that mean we can also remove the GENERIC_LOCKBREAK config option
> from the Kconfig files as well?

 I couldn't figure this out.

What I see is that the only architectures setting GENERIC_LOCKBREAK are
nds32, parisc, powerpc, s390, sh and sparc64, while the only architectures
implementing arch_spin_is_contended() are arm32, csky and ia64.

The part I don't understand is whether the option actually does anything
useful any more after commit d89c70356acf ("locking/core: Remove break_lock
field when CONFIG_GENERIC_LOCKBREAK=y").

      Arnd

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-23 16:04   ` Arnd Bergmann
@ 2021-10-25  9:57     ` Peter Zijlstra
  2021-10-25 10:06       ` Peter Zijlstra
  2021-10-25 13:06       ` Arnd Bergmann
  0 siblings, 2 replies; 13+ messages in thread
From: Peter Zijlstra @ 2021-10-25  9:57 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Waiman Long, Ingo Molnar, Will Deacon, Arnd Bergmann, Boqun Feng,
	Jonas Bonn, Stefan Kristiansson, Stafford Horne,
	James E.J. Bottomley, Helge Deller, Michael Ellerman,
	Benjamin Herrenschmidt, Paul Mackerras, Heiko Carstens,
	Vasily Gorbik, Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390

On Sat, Oct 23, 2021 at 06:04:57PM +0200, Arnd Bergmann wrote:
> On Sat, Oct 23, 2021 at 3:37 AM Waiman Long <longman@redhat.com> wrote:
> >> On 10/22/21 7:59 AM, Arnd Bergmann wrote:
> > > From: Arnd Bergmann <arnd@arndb.de>
> > >
> > > As this is all dead code, just remove it and the helper functions built
> > > around it. For arch/ia64, the inline asm could be cleaned up, but
> > > it seems safer to leave it untouched.
> > >
> > > Signed-off-by: Arnd Bergmann <arnd@arndb.de>
> >
> > Does that mean we can also remove the GENERIC_LOCKBREAK config option
> > from the Kconfig files as well?
> 
>  I couldn't figure this out.
> 
> What I see is that the only architectures setting GENERIC_LOCKBREAK are
> nds32, parisc, powerpc, s390, sh and sparc64, while the only architectures
> implementing arch_spin_is_contended() are arm32, csky and ia64.
> 
> The part I don't understand is whether the option actually does anything
> useful any more after commit d89c70356acf ("locking/core: Remove break_lock
> field when CONFIG_GENERIC_LOCKBREAK=y").

Urgh, what a mess.. AFAICT there's still code in
kernel/locking/spinlock.c that relies on it. Specifically when
GENERIC_LOCKBREAK=y we seem to create _lock*() variants that are
basically TaS locks which drop preempt/irq disable while spinning.

Anybody having this on and not having native TaS locks is in for a rude
surprise I suppose... sparc64 being the obvious candidate there :/




^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-25  9:57     ` Peter Zijlstra
@ 2021-10-25 10:06       ` Peter Zijlstra
  2021-10-25 13:06       ` Arnd Bergmann
  1 sibling, 0 replies; 13+ messages in thread
From: Peter Zijlstra @ 2021-10-25 10:06 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Waiman Long, Ingo Molnar, Will Deacon, Arnd Bergmann, Boqun Feng,
	Jonas Bonn, Stefan Kristiansson, Stafford Horne,
	James E.J. Bottomley, Helge Deller, Michael Ellerman,
	Benjamin Herrenschmidt, Paul Mackerras, Heiko Carstens,
	Vasily Gorbik, Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390

On Mon, Oct 25, 2021 at 11:57:28AM +0200, Peter Zijlstra wrote:
> On Sat, Oct 23, 2021 at 06:04:57PM +0200, Arnd Bergmann wrote:
> > On Sat, Oct 23, 2021 at 3:37 AM Waiman Long <longman@redhat.com> wrote:
> > >> On 10/22/21 7:59 AM, Arnd Bergmann wrote:
> > > > From: Arnd Bergmann <arnd@arndb.de>
> > > >
> > > > As this is all dead code, just remove it and the helper functions built
> > > > around it. For arch/ia64, the inline asm could be cleaned up, but
> > > > it seems safer to leave it untouched.
> > > >
> > > > Signed-off-by: Arnd Bergmann <arnd@arndb.de>
> > >
> > > Does that mean we can also remove the GENERIC_LOCKBREAK config option
> > > from the Kconfig files as well?
> > 
> >  I couldn't figure this out.
> > 
> > What I see is that the only architectures setting GENERIC_LOCKBREAK are
> > nds32, parisc, powerpc, s390, sh and sparc64, while the only architectures
> > implementing arch_spin_is_contended() are arm32, csky and ia64.
> > 
> > The part I don't understand is whether the option actually does anything
> > useful any more after commit d89c70356acf ("locking/core: Remove break_lock
> > field when CONFIG_GENERIC_LOCKBREAK=y").
> 
> Urgh, what a mess.. AFAICT there's still code in
> kernel/locking/spinlock.c that relies on it. Specifically when
> GENERIC_LOCKBREAK=y we seem to create _lock*() variants that are
> basically TaS locks which drop preempt/irq disable while spinning.
> 
> Anybody having this on and not having native TaS locks is in for a rude
> surprise I suppose... sparc64 being the obvious candidate there :/

Something like the *totally*untested* patch below would rip it all out.

---
 arch/ia64/Kconfig                |  3 --
 arch/nds32/Kconfig               |  4 --
 arch/parisc/Kconfig              |  5 ---
 arch/powerpc/Kconfig             |  5 ---
 arch/s390/Kconfig                |  3 --
 arch/sh/Kconfig                  |  4 --
 arch/sparc/Kconfig               |  6 ---
 include/linux/rwlock_api_smp.h   |  4 +-
 include/linux/spinlock_api_smp.h |  4 +-
 kernel/Kconfig.locks             | 26 ++++++------
 kernel/locking/spinlock.c        | 90 ----------------------------------------
 11 files changed, 17 insertions(+), 137 deletions(-)

diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1e33666fa679..5ec3abba3c81 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -81,9 +81,6 @@ config MMU
 config STACKTRACE_SUPPORT
 	def_bool y
 
-config GENERIC_LOCKBREAK
-	def_bool n
-
 config GENERIC_CALIBRATE_DELAY
 	bool
 	default y
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index aea26e739543..699008dbd6c2 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -59,10 +59,6 @@ config GENERIC_CSUM
 config GENERIC_HWEIGHT
 	def_bool y
 
-config GENERIC_LOCKBREAK
-	def_bool y
-	depends on PREEMPTION
-
 config STACKTRACE_SUPPORT
 	def_bool y
 
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 27a8b49af11f..afe70bcdde2c 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -86,11 +86,6 @@ config ARCH_DEFCONFIG
 	default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT
 	default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT
 
-config GENERIC_LOCKBREAK
-	bool
-	default y
-	depends on SMP && PREEMPTION
-
 config ARCH_HAS_ILOG2_U32
 	bool
 	default n
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ba5b66189358..e782c9ea3f81 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -98,11 +98,6 @@ config LOCKDEP_SUPPORT
 	bool
 	default y
 
-config GENERIC_LOCKBREAK
-	bool
-	default y
-	depends on SMP && PREEMPTION
-
 config GENERIC_HWEIGHT
 	bool
 	default y
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b86de61b8caa..e4ff05f5393b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -26,9 +26,6 @@ config GENERIC_BUG
 config GENERIC_BUG_RELATIVE_POINTERS
 	def_bool y
 
-config GENERIC_LOCKBREAK
-	def_bool y if PREEMPTION
-
 config PGSTE
 	def_bool y if KVM
 
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 6904f4bdbf00..26f1cf2c69a3 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -86,10 +86,6 @@ config GENERIC_HWEIGHT
 config GENERIC_CALIBRATE_DELAY
 	bool
 
-config GENERIC_LOCKBREAK
-	def_bool y
-	depends on SMP && PREEMPTION
-
 config ARCH_SUSPEND_POSSIBLE
 	def_bool n
 
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index b120ed947f50..e77e7254eaa0 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -246,12 +246,6 @@ config US3_MC
 
 	  If in doubt, say Y, as this information can be very useful.
 
-# Global things across all Sun machines.
-config GENERIC_LOCKBREAK
-	bool
-	default y
-	depends on SPARC64 && SMP && PREEMPTION
-
 config NUMA
 	bool "NUMA support"
 	depends on SPARC64 && SMP
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index abfb53ab11be..a281d81ef8ee 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -141,7 +141,7 @@ static inline int __raw_write_trylock(rwlock_t *lock)
  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  */
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if defined(CONFIG_DEBUG_LOCK_ALLOC)
 
 static inline void __raw_read_lock(rwlock_t *lock)
 {
@@ -211,7 +211,7 @@ static inline void __raw_write_lock(rwlock_t *lock)
 	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
 }
 
-#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 static inline void __raw_write_unlock(rwlock_t *lock)
 {
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 6b8e1a0b137b..fb437243eb2e 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -99,7 +99,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  */
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if defined(CONFIG_DEBUG_LOCK_ALLOC)
 
 static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
 {
@@ -143,7 +143,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 }
 
-#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 4198f0273ecd..8e0b501189e8 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -93,7 +93,7 @@ config UNINLINE_SPIN_UNLOCK
 
 #
 # lock_* functions are inlined when:
-#   - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
+#   - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
 #
 # trylock_* functions are inlined when:
 #   - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
@@ -119,19 +119,19 @@ config INLINE_SPIN_TRYLOCK_BH
 
 config INLINE_SPIN_LOCK
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK
+	depends on ARCH_INLINE_SPIN_LOCK
 
 config INLINE_SPIN_LOCK_BH
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_BH
+	depends on ARCH_INLINE_SPIN_LOCK_BH
 
 config INLINE_SPIN_LOCK_IRQ
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQ
+	depends on ARCH_INLINE_SPIN_LOCK_IRQ
 
 config INLINE_SPIN_LOCK_IRQSAVE
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQSAVE
+	depends on ARCH_INLINE_SPIN_LOCK_IRQSAVE
 
 config INLINE_SPIN_UNLOCK_BH
 	def_bool y
@@ -152,19 +152,19 @@ config INLINE_READ_TRYLOCK
 
 config INLINE_READ_LOCK
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK
+	depends on ARCH_INLINE_READ_LOCK
 
 config INLINE_READ_LOCK_BH
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_BH
+	depends on ARCH_INLINE_READ_LOCK_BH
 
 config INLINE_READ_LOCK_IRQ
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQ
+	depends on ARCH_INLINE_READ_LOCK_IRQ
 
 config INLINE_READ_LOCK_IRQSAVE
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQSAVE
+	depends on ARCH_INLINE_READ_LOCK_IRQSAVE
 
 config INLINE_READ_UNLOCK
 	def_bool y
@@ -189,19 +189,19 @@ config INLINE_WRITE_TRYLOCK
 
 config INLINE_WRITE_LOCK
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK
+	depends on ARCH_INLINE_WRITE_LOCK
 
 config INLINE_WRITE_LOCK_BH
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_BH
+	depends on ARCH_INLINE_WRITE_LOCK_BH
 
 config INLINE_WRITE_LOCK_IRQ
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQ
+	depends on ARCH_INLINE_WRITE_LOCK_IRQ
 
 config INLINE_WRITE_LOCK_IRQSAVE
 	def_bool y
-	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQSAVE
+	depends on ARCH_INLINE_WRITE_LOCK_IRQSAVE
 
 config INLINE_WRITE_UNLOCK
 	def_bool y
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index c5830cfa379a..d94ee95585fc 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -29,19 +29,6 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
 #endif
 #endif
 
-/*
- * If lockdep is enabled then we use the non-preemption spin-ops
- * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
- * not re-enabled during lock-acquire (which the preempt-spin-ops do):
- */
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-/*
- * The __lock_function inlines are taken from
- * spinlock : include/linux/spinlock_api_smp.h
- * rwlock   : include/linux/rwlock_api_smp.h
- */
-#else
-
 /*
  * Some architectures can relax in favour of the CPU owning the lock.
  */
@@ -55,83 +42,6 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
 # define arch_spin_relax(l)	cpu_relax()
 #endif
 
-/*
- * We build the __lock_function inlines here. They are too large for
- * inlining all over the place, but here is only one user per function
- * which embeds them into the calling _lock_function below.
- *
- * This could be a long-held lock. We both prepare to spin for a long
- * time (making _this_ CPU preemptible if possible), and we also signal
- * towards that other CPU that it should break the lock ASAP.
- */
-#define BUILD_LOCK_OPS(op, locktype)					\
-void __lockfunc __raw_##op##_lock(locktype##_t *lock)			\
-{									\
-	for (;;) {							\
-		preempt_disable();					\
-		if (likely(do_raw_##op##_trylock(lock)))		\
-			break;						\
-		preempt_enable();					\
-									\
-		arch_##op##_relax(&lock->raw_lock);			\
-	}								\
-}									\
-									\
-unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)	\
-{									\
-	unsigned long flags;						\
-									\
-	for (;;) {							\
-		preempt_disable();					\
-		local_irq_save(flags);					\
-		if (likely(do_raw_##op##_trylock(lock)))		\
-			break;						\
-		local_irq_restore(flags);				\
-		preempt_enable();					\
-									\
-		arch_##op##_relax(&lock->raw_lock);			\
-	}								\
-									\
-	return flags;							\
-}									\
-									\
-void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)		\
-{									\
-	_raw_##op##_lock_irqsave(lock);					\
-}									\
-									\
-void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)		\
-{									\
-	unsigned long flags;						\
-									\
-	/*							*/	\
-	/* Careful: we must exclude softirqs too, hence the	*/	\
-	/* irq-disabling. We use the generic preemption-aware	*/	\
-	/* function:						*/	\
-	/**/								\
-	flags = _raw_##op##_lock_irqsave(lock);				\
-	local_bh_disable();						\
-	local_irq_restore(flags);					\
-}									\
-
-/*
- * Build preemption-friendly versions of the following
- * lock-spinning functions:
- *
- *         __[spin|read|write]_lock()
- *         __[spin|read|write]_lock_irq()
- *         __[spin|read|write]_lock_irqsave()
- *         __[spin|read|write]_lock_bh()
- */
-BUILD_LOCK_OPS(spin, raw_spinlock);
-
-#ifndef CONFIG_PREEMPT_RT
-BUILD_LOCK_OPS(read, rwlock);
-BUILD_LOCK_OPS(write, rwlock);
-#endif
-
-#endif
-
 #ifndef CONFIG_INLINE_SPIN_TRYLOCK
 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
 {

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-25  9:57     ` Peter Zijlstra
  2021-10-25 10:06       ` Peter Zijlstra
@ 2021-10-25 13:06       ` Arnd Bergmann
  2021-10-25 14:33         ` Peter Zijlstra
  2021-10-25 15:28         ` Waiman Long
  1 sibling, 2 replies; 13+ messages in thread
From: Arnd Bergmann @ 2021-10-25 13:06 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Waiman Long, Ingo Molnar, Will Deacon, Arnd Bergmann, Boqun Feng,
	Jonas Bonn, Stefan Kristiansson, Stafford Horne,
	James E.J. Bottomley, Helge Deller, Michael Ellerman,
	Benjamin Herrenschmidt, Paul Mackerras, Heiko Carstens,
	Vasily Gorbik, Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390

On Mon, Oct 25, 2021 at 11:57 AM Peter Zijlstra <peterz@infradead.org> wrote:
> On Sat, Oct 23, 2021 at 06:04:57PM +0200, Arnd Bergmann wrote:
> > On Sat, Oct 23, 2021 at 3:37 AM Waiman Long <longman@redhat.com> wrote:
> > >> On 10/22/21 7:59 AM, Arnd Bergmann wrote:
> > > > From: Arnd Bergmann <arnd@arndb.de>
> > > >
> > > > As this is all dead code, just remove it and the helper functions built
> > > > around it. For arch/ia64, the inline asm could be cleaned up, but
> > > > it seems safer to leave it untouched.
> > > >
> > > > Signed-off-by: Arnd Bergmann <arnd@arndb.de>
> > >
> > > Does that mean we can also remove the GENERIC_LOCKBREAK config option
> > > from the Kconfig files as well?
> >
> >  I couldn't figure this out.
> >
> > What I see is that the only architectures setting GENERIC_LOCKBREAK are
> > nds32, parisc, powerpc, s390, sh and sparc64, while the only architectures
> > implementing arch_spin_is_contended() are arm32, csky and ia64.
> >
> > The part I don't understand is whether the option actually does anything
> > useful any more after commit d89c70356acf ("locking/core: Remove break_lock
> > field when CONFIG_GENERIC_LOCKBREAK=y").
>
> Urgh, what a mess.. AFAICT there's still code in
> kernel/locking/spinlock.c that relies on it. Specifically when
> GENERIC_LOCKBREAK=y we seem to create _lock*() variants that are
> basically TaS locks which drop preempt/irq disable while spinning.
>
> Anybody having this on and not having native TaS locks is in for a rude
> surprise I suppose... sparc64 being the obvious candidate there :/

Is this a problem on s390 and powerpc, those two being the ones
that matter in practice?

On s390, we pick between the cmpxchg() based directed-yield when
running on virtualized CPUs, and a normal qspinlock when running on a
dedicated CPU.

On PowerPC, we pick at compile-time between either the qspinlock
(default-enabled on Book3S-64, i.e. all server chips) or a ll/sc based
spinlock plus vm_yield() (default on embedded and 32-bit mac).

       Arnd

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-25 13:06       ` Arnd Bergmann
@ 2021-10-25 14:33         ` Peter Zijlstra
  2021-10-27 12:01           ` Michael Ellerman
  2021-10-25 15:28         ` Waiman Long
  1 sibling, 1 reply; 13+ messages in thread
From: Peter Zijlstra @ 2021-10-25 14:33 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Waiman Long, Ingo Molnar, Will Deacon, Arnd Bergmann, Boqun Feng,
	Jonas Bonn, Stefan Kristiansson, Stafford Horne,
	James E.J. Bottomley, Helge Deller, Michael Ellerman,
	Benjamin Herrenschmidt, Paul Mackerras, Heiko Carstens,
	Vasily Gorbik, Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390

On Mon, Oct 25, 2021 at 03:06:24PM +0200, Arnd Bergmann wrote:
> On Mon, Oct 25, 2021 at 11:57 AM Peter Zijlstra <peterz@infradead.org> wrote:
> > On Sat, Oct 23, 2021 at 06:04:57PM +0200, Arnd Bergmann wrote:
> > > On Sat, Oct 23, 2021 at 3:37 AM Waiman Long <longman@redhat.com> wrote:
> > > >> On 10/22/21 7:59 AM, Arnd Bergmann wrote:
> > > > > From: Arnd Bergmann <arnd@arndb.de>
> > > > >
> > > > > As this is all dead code, just remove it and the helper functions built
> > > > > around it. For arch/ia64, the inline asm could be cleaned up, but
> > > > > it seems safer to leave it untouched.
> > > > >
> > > > > Signed-off-by: Arnd Bergmann <arnd@arndb.de>
> > > >
> > > > Does that mean we can also remove the GENERIC_LOCKBREAK config option
> > > > from the Kconfig files as well?
> > >
> > >  I couldn't figure this out.
> > >
> > > What I see is that the only architectures setting GENERIC_LOCKBREAK are
> > > nds32, parisc, powerpc, s390, sh and sparc64, while the only architectures
> > > implementing arch_spin_is_contended() are arm32, csky and ia64.
> > >
> > > The part I don't understand is whether the option actually does anything
> > > useful any more after commit d89c70356acf ("locking/core: Remove break_lock
> > > field when CONFIG_GENERIC_LOCKBREAK=y").
> >
> > Urgh, what a mess.. AFAICT there's still code in
> > kernel/locking/spinlock.c that relies on it. Specifically when
> > GENERIC_LOCKBREAK=y we seem to create _lock*() variants that are
> > basically TaS locks which drop preempt/irq disable while spinning.
> >
> > Anybody having this on and not having native TaS locks is in for a rude
> > surprise I suppose... sparc64 being the obvious candidate there :/
> 
> Is this a problem on s390 and powerpc, those two being the ones
> that matter in practice?
> 
> On s390, we pick between the cmpxchg() based directed-yield when
> running on virtualized CPUs, and a normal qspinlock when running on a
> dedicated CPU.
> 
> On PowerPC, we pick at compile-time between either the qspinlock
> (default-enabled on Book3S-64, i.e. all server chips) or a ll/sc based
> spinlock plus vm_yield() (default on embedded and 32-bit mac).

Urgh, yeah, so this crud undermines the whole point of having a fair
lock. I'm thinking s390 and Power want to have this fixed.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-25 13:06       ` Arnd Bergmann
  2021-10-25 14:33         ` Peter Zijlstra
@ 2021-10-25 15:28         ` Waiman Long
  2021-10-25 15:44           ` Arnd Bergmann
  1 sibling, 1 reply; 13+ messages in thread
From: Waiman Long @ 2021-10-25 15:28 UTC (permalink / raw)
  To: Arnd Bergmann, Peter Zijlstra
  Cc: Ingo Molnar, Will Deacon, Arnd Bergmann, Boqun Feng, Jonas Bonn,
	Stefan Kristiansson, Stafford Horne, James E.J. Bottomley,
	Helge Deller, Michael Ellerman, Benjamin Herrenschmidt,
	Paul Mackerras, Heiko Carstens, Vasily Gorbik,
	Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390


On 10/25/21 9:06 AM, Arnd Bergmann wrote:
> On Mon, Oct 25, 2021 at 11:57 AM Peter Zijlstra <peterz@infradead.org> wrote:
>> On Sat, Oct 23, 2021 at 06:04:57PM +0200, Arnd Bergmann wrote:
>>> On Sat, Oct 23, 2021 at 3:37 AM Waiman Long <longman@redhat.com> wrote:
>>>>> On 10/22/21 7:59 AM, Arnd Bergmann wrote:
>>>>> From: Arnd Bergmann <arnd@arndb.de>
>>>>>
>>>>> As this is all dead code, just remove it and the helper functions built
>>>>> around it. For arch/ia64, the inline asm could be cleaned up, but
>>>>> it seems safer to leave it untouched.
>>>>>
>>>>> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
>>>> Does that mean we can also remove the GENERIC_LOCKBREAK config option
>>>> from the Kconfig files as well?
>>>   I couldn't figure this out.
>>>
>>> What I see is that the only architectures setting GENERIC_LOCKBREAK are
>>> nds32, parisc, powerpc, s390, sh and sparc64, while the only architectures
>>> implementing arch_spin_is_contended() are arm32, csky and ia64.
>>>
>>> The part I don't understand is whether the option actually does anything
>>> useful any more after commit d89c70356acf ("locking/core: Remove break_lock
>>> field when CONFIG_GENERIC_LOCKBREAK=y").
>> Urgh, what a mess.. AFAICT there's still code in
>> kernel/locking/spinlock.c that relies on it. Specifically when
>> GENERIC_LOCKBREAK=y we seem to create _lock*() variants that are
>> basically TaS locks which drop preempt/irq disable while spinning.
>>
>> Anybody having this on and not having native TaS locks is in for a rude
>> surprise I suppose... sparc64 being the obvious candidate there :/
> Is this a problem on s390 and powerpc, those two being the ones
> that matter in practice?
>
> On s390, we pick between the cmpxchg() based directed-yield when
> running on virtualized CPUs, and a normal qspinlock when running on a
> dedicated CPU.

I am not aware that s390 is using qspinlocks at all as I don't see 
ARCH_USE_QUEUED_SPINLOCKS being set anywhere under arch/s390. I only see 
that it uses a cmpxchg based spinlock.

Cheers,
Longman




^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-25 15:28         ` Waiman Long
@ 2021-10-25 15:44           ` Arnd Bergmann
  2021-10-25 18:25             ` Waiman Long
  0 siblings, 1 reply; 13+ messages in thread
From: Arnd Bergmann @ 2021-10-25 15:44 UTC (permalink / raw)
  To: Waiman Long
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Arnd Bergmann,
	Boqun Feng, Jonas Bonn, Stefan Kristiansson, Stafford Horne,
	James E.J. Bottomley, Helge Deller, Michael Ellerman,
	Benjamin Herrenschmidt, Paul Mackerras, Heiko Carstens,
	Vasily Gorbik, Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390

On Mon, Oct 25, 2021 at 5:28 PM Waiman Long <longman@redhat.com> wrote:
> On 10/25/21 9:06 AM, Arnd Bergmann wrote:
> >
> > On s390, we pick between the cmpxchg() based directed-yield when
> > running on virtualized CPUs, and a normal qspinlock when running on a
> > dedicated CPU.
>
> I am not aware that s390 is using qspinlocks at all as I don't see
> ARCH_USE_QUEUED_SPINLOCKS being set anywhere under arch/s390. I only see
> that it uses a cmpxchg based spinlock.

Sorry, I should not have said "normal" here. See arch/s390/lib/spinlock.c
for their custom queued spinlocks as implemented in arch_spin_lock_queued().
I don't know if that code actually does the same thing as the generic qspinlock,
but it seems at least similar.

       Arnd

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-25 15:44           ` Arnd Bergmann
@ 2021-10-25 18:25             ` Waiman Long
  0 siblings, 0 replies; 13+ messages in thread
From: Waiman Long @ 2021-10-25 18:25 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Arnd Bergmann,
	Boqun Feng, Jonas Bonn, Stefan Kristiansson, Stafford Horne,
	James E.J. Bottomley, Helge Deller, Michael Ellerman,
	Benjamin Herrenschmidt, Paul Mackerras, Heiko Carstens,
	Vasily Gorbik, Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390

On 10/25/21 11:44 AM, Arnd Bergmann wrote:
> On Mon, Oct 25, 2021 at 5:28 PM Waiman Long <longman@redhat.com> wrote:
>> On 10/25/21 9:06 AM, Arnd Bergmann wrote:
>>> On s390, we pick between the cmpxchg() based directed-yield when
>>> running on virtualized CPUs, and a normal qspinlock when running on a
>>> dedicated CPU.
>> I am not aware that s390 is using qspinlocks at all as I don't see
>> ARCH_USE_QUEUED_SPINLOCKS being set anywhere under arch/s390. I only see
>> that it uses a cmpxchg based spinlock.
> Sorry, I should not have said "normal" here. See arch/s390/lib/spinlock.c
> for their custom queued spinlocks as implemented in arch_spin_lock_queued().
> I don't know if that code actually does the same thing as the generic qspinlock,
> but it seems at least similar.

Yes, you are right. Their queued lock code looks like a custom version 
of the pvqspinlock code.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] locking: remove spin_lock_flags() etc
  2021-10-25 14:33         ` Peter Zijlstra
@ 2021-10-27 12:01           ` Michael Ellerman
  0 siblings, 0 replies; 13+ messages in thread
From: Michael Ellerman @ 2021-10-27 12:01 UTC (permalink / raw)
  To: Peter Zijlstra, Arnd Bergmann
  Cc: Waiman Long, Ingo Molnar, Will Deacon, Arnd Bergmann, Boqun Feng,
	Jonas Bonn, Stefan Kristiansson, Stafford Horne,
	James E.J. Bottomley, Helge Deller, Benjamin Herrenschmidt,
	Paul Mackerras, Heiko Carstens, Vasily Gorbik,
	Christian Borntraeger, Alexander Gordeev,
	Linux Kernel Mailing List, linux-ia64, Openrisc, Parisc List,
	linuxppc-dev, linux-s390

Peter Zijlstra <peterz@infradead.org> writes:
> On Mon, Oct 25, 2021 at 03:06:24PM +0200, Arnd Bergmann wrote:
>> On Mon, Oct 25, 2021 at 11:57 AM Peter Zijlstra <peterz@infradead.org> wrote:
>> > On Sat, Oct 23, 2021 at 06:04:57PM +0200, Arnd Bergmann wrote:
>> > > On Sat, Oct 23, 2021 at 3:37 AM Waiman Long <longman@redhat.com> wrote:
>> > > >> On 10/22/21 7:59 AM, Arnd Bergmann wrote:
>> > > > > From: Arnd Bergmann <arnd@arndb.de>
>> > > > >
>> > > > > As this is all dead code, just remove it and the helper functions built
>> > > > > around it. For arch/ia64, the inline asm could be cleaned up, but
>> > > > > it seems safer to leave it untouched.
>> > > > >
>> > > > > Signed-off-by: Arnd Bergmann <arnd@arndb.de>
>> > > >
>> > > > Does that mean we can also remove the GENERIC_LOCKBREAK config option
>> > > > from the Kconfig files as well?
>> > >
>> > >  I couldn't figure this out.
>> > >
>> > > What I see is that the only architectures setting GENERIC_LOCKBREAK are
>> > > nds32, parisc, powerpc, s390, sh and sparc64, while the only architectures
>> > > implementing arch_spin_is_contended() are arm32, csky and ia64.
>> > >
>> > > The part I don't understand is whether the option actually does anything
>> > > useful any more after commit d89c70356acf ("locking/core: Remove break_lock
>> > > field when CONFIG_GENERIC_LOCKBREAK=y").
>> >
>> > Urgh, what a mess.. AFAICT there's still code in
>> > kernel/locking/spinlock.c that relies on it. Specifically when
>> > GENERIC_LOCKBREAK=y we seem to create _lock*() variants that are
>> > basically TaS locks which drop preempt/irq disable while spinning.
>> >
>> > Anybody having this on and not having native TaS locks is in for a rude
>> > surprise I suppose... sparc64 being the obvious candidate there :/
>> 
>> Is this a problem on s390 and powerpc, those two being the ones
>> that matter in practice?
>> 
>> On s390, we pick between the cmpxchg() based directed-yield when
>> running on virtualized CPUs, and a normal qspinlock when running on a
>> dedicated CPU.
>> 
>> On PowerPC, we pick at compile-time between either the qspinlock
>> (default-enabled on Book3S-64, i.e. all server chips) or a ll/sc based
>> spinlock plus vm_yield() (default on embedded and 32-bit mac).
>
> Urgh, yeah, so this crud undermines the whole point of having a fair
> lock. I'm thinking s390 and Power want to have this fixed.

Our Kconfig has:

  config GENERIC_LOCKBREAK
  	bool
  	default y
  	depends on SMP && PREEMPTION

And we have exactly one defconfig that enables both SMP and PREEMPT,
arch/powerpc/configs/85xx/ge_imp3a_defconfig, which is some ~10 year old
PCI card embedded thing I've never heard of. High chance anyone who has
those is not running upstream kernels on them.

So I think we'd be happy for you rip GENERIC_LOCKBREAK out, it's almost
entirely unused on powerpc anyway.

cheers

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [tip: locking/core] locking: Remove spin_lock_flags() etc
  2021-10-22 11:59 [PATCH] locking: remove spin_lock_flags() etc Arnd Bergmann
  2021-10-22 14:10 ` Helge Deller
  2021-10-23  1:37 ` Waiman Long
@ 2021-10-30 14:47 ` tip-bot2 for Arnd Bergmann
  2 siblings, 0 replies; 13+ messages in thread
From: tip-bot2 for Arnd Bergmann @ 2021-10-30 14:47 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Arnd Bergmann, Peter Zijlstra (Intel), Helge Deller, x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     f98a3dccfcb0b9b9c3bef8df9edd61cda80ad937
Gitweb:        https://git.kernel.org/tip/f98a3dccfcb0b9b9c3bef8df9edd61cda80ad937
Author:        Arnd Bergmann <arnd@arndb.de>
AuthorDate:    Fri, 22 Oct 2021 13:59:38 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Sat, 30 Oct 2021 16:37:28 +02:00

locking: Remove spin_lock_flags() etc

parisc, ia64 and powerpc32 are the only remaining architectures that
provide custom arch_{spin,read,write}_lock_flags() functions, which are
meant to re-enable interrupts while waiting for a spinlock.

However, none of these can actually run into this codepath, because
it is only called on architectures without CONFIG_GENERIC_LOCKBREAK,
or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none
of those combinations are possible on the three architectures.

Going back in the git history, it appears that arch/mn10300 may have
been able to run into this code path, but there is a good chance that
it never worked. On the architectures that still exist, it was
already impossible to hit back in 2008 after the introduction of
CONFIG_GENERIC_LOCKBREAK, and possibly earlier.

As this is all dead code, just remove it and the helper functions built
around it. For arch/ia64, the inline asm could be cleaned up, but
it seems safer to leave it untouched.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Helge Deller <deller@gmx.de>  # parisc
Link: https://lore.kernel.org/r/20211022120058.1031690-1-arnd@kernel.org
---
 arch/ia64/include/asm/spinlock.h           | 23 +++++----------------
 arch/openrisc/include/asm/spinlock.h       |  3 +---
 arch/parisc/include/asm/spinlock.h         | 15 +--------------
 arch/powerpc/include/asm/simple_spinlock.h | 21 +-------------------
 arch/s390/include/asm/spinlock.h           |  8 +-------
 include/linux/lockdep.h                    | 17 +----------------
 include/linux/rwlock.h                     | 15 +--------------
 include/linux/rwlock_api_smp.h             |  6 +----
 include/linux/spinlock.h                   | 13 +------------
 include/linux/spinlock_api_smp.h           |  9 +--------
 include/linux/spinlock_up.h                |  1 +-
 kernel/locking/spinlock.c                  |  3 +--
 12 files changed, 9 insertions(+), 125 deletions(-)

diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 8647759..0e5c1ad 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -124,18 +124,13 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 	__ticket_spin_unlock(lock);
 }
 
-static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
-						  unsigned long flags)
-{
-	arch_spin_lock(lock);
-}
-#define arch_spin_lock_flags	arch_spin_lock_flags
-
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
-arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_read_lock(arch_rwlock_t *lock)
 {
+	unsigned long flags = 0;
+
 	__asm__ __volatile__ (
 		"tbit.nz p6, p0 = %1,%2\n"
 		"br.few 3f\n"
@@ -157,13 +152,8 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
 		: "p6", "p7", "r2", "memory");
 }
 
-#define arch_read_lock_flags arch_read_lock_flags
-#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
-
 #else /* !ASM_SUPPORTED */
 
-#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-
 #define arch_read_lock(rw)								\
 do {											\
 	arch_rwlock_t *__read_lock_ptr = (rw);						\
@@ -186,8 +176,10 @@ do {								\
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
-arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_write_lock(arch_rwlock_t *lock)
 {
+	unsigned long flags = 0;
+
 	__asm__ __volatile__ (
 		"tbit.nz p6, p0 = %1, %2\n"
 		"mov ar.ccv = r0\n"
@@ -210,9 +202,6 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
 		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
 }
 
-#define arch_write_lock_flags arch_write_lock_flags
-#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-
 #define arch_write_trylock(rw)							\
 ({										\
 	register long result;							\
diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
index a8940bd..264944a 100644
--- a/arch/openrisc/include/asm/spinlock.h
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -19,9 +19,6 @@
 
 #include <asm/qrwlock.h>
 
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
 #define arch_spin_relax(lock)	cpu_relax()
 #define arch_read_relax(lock)	cpu_relax()
 #define arch_write_relax(lock)	cpu_relax()
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fa5ee8a..a6e5d66 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -23,21 +23,6 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
 			continue;
 }
 
-static inline void arch_spin_lock_flags(arch_spinlock_t *x,
-					unsigned long flags)
-{
-	volatile unsigned int *a;
-
-	a = __ldcw_align(x);
-	while (__ldcw(a) == 0)
-		while (*a == 0)
-			if (flags & PSW_SM_I) {
-				local_irq_enable();
-				local_irq_disable();
-			}
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
 static inline void arch_spin_unlock(arch_spinlock_t *x)
 {
 	volatile unsigned int *a;
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 8985791..7ae6aee 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 	}
 }
 
-static inline
-void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
-	unsigned long flags_dis;
-
-	while (1) {
-		if (likely(__arch_spin_trylock(lock) == 0))
-			break;
-		local_save_flags(flags_dis);
-		local_irq_restore(flags);
-		do {
-			HMT_low();
-			if (is_shared_processor())
-				splpar_spin_yield(lock);
-		} while (unlikely(lock->slock != 0));
-		HMT_medium();
-		local_irq_restore(flags_dis);
-	}
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
 	__asm__ __volatile__("# arch_spin_unlock\n\t"
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index ef59588..888a2f1 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -67,14 +67,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lp)
 		arch_spin_lock_wait(lp);
 }
 
-static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
-					unsigned long flags)
-{
-	if (!arch_spin_trylock_once(lp))
-		arch_spin_lock_wait(lp);
-}
-#define arch_spin_lock_flags	arch_spin_lock_flags
-
 static inline int arch_spin_trylock(arch_spinlock_t *lp)
 {
 	if (!arch_spin_trylock_once(lp))
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9fe165b..467b942 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -481,23 +481,6 @@ do {								\
 
 #endif /* CONFIG_LOCK_STAT */
 
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
-	LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
-	lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
 #ifdef CONFIG_PROVE_LOCKING
 extern void print_irqtrace_events(struct task_struct *curr);
 #else
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 7ce9a51..2c0ad41 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -30,31 +30,16 @@ do {								\
 
 #ifdef CONFIG_DEBUG_SPINLOCK
  extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
  extern int do_raw_read_trylock(rwlock_t *lock);
  extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
  extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
  extern int do_raw_write_trylock(rwlock_t *lock);
  extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
 #else
-
-#ifndef arch_read_lock_flags
-# define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
-#endif
-
-#ifndef arch_write_lock_flags
-# define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
-#endif
-
 # define do_raw_read_lock(rwlock)	do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
-		do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
 # define do_raw_read_trylock(rwlock)	arch_read_trylock(&(rwlock)->raw_lock)
 # define do_raw_read_unlock(rwlock)	do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
 # define do_raw_write_lock(rwlock)	do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_lock_flags(lock, flags) \
-		do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
 # define do_raw_write_trylock(rwlock)	arch_write_trylock(&(rwlock)->raw_lock)
 # define do_raw_write_unlock(rwlock)	do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
 #endif
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index abfb53a..f1db6f1 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -157,8 +157,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
 	local_irq_save(flags);
 	preempt_disable();
 	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-	LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
-			     do_raw_read_lock_flags, &flags);
+	LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
 	return flags;
 }
 
@@ -184,8 +183,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
 	local_irq_save(flags);
 	preempt_disable();
 	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
-			     do_raw_write_lock_flags, &flags);
+	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
 	return flags;
 }
 
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 45310ea..f044706 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -177,7 +177,6 @@ do {									\
 
 #ifdef CONFIG_DEBUG_SPINLOCK
  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
-#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
 #else
@@ -188,18 +187,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
 	mmiowb_spin_lock();
 }
 
-#ifndef arch_spin_lock_flags
-#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
-#endif
-
-static inline void
-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
-{
-	__acquire(lock);
-	arch_spin_lock_flags(&lock->raw_lock, *flags);
-	mmiowb_spin_lock();
-}
-
 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
 {
 	int ret = arch_spin_trylock(&(lock)->raw_lock);
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 6b8e1a0..51fa0da 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
 	local_irq_save(flags);
 	preempt_disable();
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	/*
-	 * On lockdep we dont want the hand-coded irq-enable of
-	 * do_raw_spin_lock_flags() code, because lockdep assumes
-	 * that interrupts are not re-enabled during lock-acquire:
-	 */
-#ifdef CONFIG_LOCKDEP
 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
-#else
-	do_raw_spin_lock_flags(lock, &flags);
-#endif
 	return flags;
 }
 
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0ac9112..1652107 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 #define arch_spin_is_locked(lock)	((void)(lock), 0)
 /* for sched/core.c and kernel_lock.c: */
 # define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0)
 # define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0)
 # define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })
 #endif /* DEBUG_SPINLOCK */
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index c5830cf..b562f92 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -378,8 +378,7 @@ unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
 	local_irq_save(flags);
 	preempt_disable();
 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
-				do_raw_spin_lock_flags, &flags);
+	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 	return flags;
 }
 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);

^ permalink raw reply related	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-10-30 14:47 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-22 11:59 [PATCH] locking: remove spin_lock_flags() etc Arnd Bergmann
2021-10-22 14:10 ` Helge Deller
2021-10-23  1:37 ` Waiman Long
2021-10-23 16:04   ` Arnd Bergmann
2021-10-25  9:57     ` Peter Zijlstra
2021-10-25 10:06       ` Peter Zijlstra
2021-10-25 13:06       ` Arnd Bergmann
2021-10-25 14:33         ` Peter Zijlstra
2021-10-27 12:01           ` Michael Ellerman
2021-10-25 15:28         ` Waiman Long
2021-10-25 15:44           ` Arnd Bergmann
2021-10-25 18:25             ` Waiman Long
2021-10-30 14:47 ` [tip: locking/core] locking: Remove " tip-bot2 for Arnd Bergmann

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).