All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jordan Niethe <jniethe5@gmail.com>
To: Nicholas Piggin <npiggin@gmail.com>, linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH 13/17] powerpc/qspinlock: trylock and initial lock attempt may steal
Date: Thu, 10 Nov 2022 11:43:02 +1100	[thread overview]
Message-ID: <102be7ce979d8edc5eec56881a5d14c1eb2211b2.camel@gmail.com> (raw)
In-Reply-To: <20220728063120.2867508-15-npiggin@gmail.com>

On Thu, 2022-07-28 at 16:31 +1000, Nicholas Piggin wrote:
[resend as utf-8, not utf-7]
> This gives trylock slightly more strength, and it also gives most
> of the benefit of passing 'val' back through the slowpath without
> the complexity.
> ---
>  arch/powerpc/include/asm/qspinlock.h | 39 +++++++++++++++++++++++++++-
>  arch/powerpc/lib/qspinlock.c         |  9 +++++++
>  2 files changed, 47 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
> index 44601b261e08..d3d2039237b2 100644
> --- a/arch/powerpc/include/asm/qspinlock.h
> +++ b/arch/powerpc/include/asm/qspinlock.h
> @@ -5,6 +5,8 @@
>  #include <linux/compiler.h>
>  #include <asm/qspinlock_types.h>
>  
> +#define _Q_SPIN_TRY_LOCK_STEAL 1

Would this be a config option?

> +
>  static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
>  {
>  	return READ_ONCE(lock->val);
> @@ -26,11 +28,12 @@ static __always_inline u32 queued_spin_get_locked_val(void)
>  	return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET);
>  }
>  
> -static __always_inline int queued_spin_trylock(struct qspinlock *lock)
> +static __always_inline int __queued_spin_trylock_nosteal(struct qspinlock *lock)
>  {
>  	u32 new = queued_spin_get_locked_val();
>  	u32 prev;
>  
> +	/* Trylock succeeds only when unlocked and no queued nodes */
>  	asm volatile(
>  "1:	lwarx	%0,0,%1,%3	# queued_spin_trylock			\n"

s/queued_spin_trylock/__queued_spin_trylock_nosteal

>  "	cmpwi	0,%0,0							\n"
> @@ -49,6 +52,40 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
>  	return 0;
>  }
>  
> +static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
> +{
> +	u32 new = queued_spin_get_locked_val();
> +	u32 prev, tmp;
> +
> +	/* Trylock may get ahead of queued nodes if it finds unlocked */
> +	asm volatile(
> +"1:	lwarx	%0,0,%2,%5	# queued_spin_trylock			\n"

s/queued_spin_trylock/__queued_spin_trylock_steal

> +"	andc.	%1,%0,%4						\n"
> +"	bne-	2f							\n"
> +"	and	%1,%0,%4						\n"
> +"	or	%1,%1,%3						\n"
> +"	stwcx.	%1,0,%2							\n"
> +"	bne-	1b							\n"
> +"\t"	PPC_ACQUIRE_BARRIER "						\n"
> +"2:									\n"

Just because there's a little bit more going on here...

Q_TAIL_CPU_MASK = 0xFFFE0000
~Q_TAIL_CPU_MASK = 0x1FFFF


1:	lwarx	prev, 0, &lock->val, IS_ENABLED_PPC64
	andc.	tmp, prev, _Q_TAIL_CPU_MASK 	(tmp = prev & ~_Q_TAIL_CPU_MASK)
	bne-	2f 				(exit if locked)
	and	tmp, prev, _Q_TAIL_CPU_MASK 	(tmp = prev & _Q_TAIL_CPU_MASK)
	or	tmp, tmp, new			(tmp |= new)					
	stwcx.	tmp, 0, &lock->val					
		
	bne-	1b							
	PPC_ACQUIRE_BARRIER		
2:

... which seems correct.


> +	: "=&r" (prev), "=&r" (tmp)
> +	: "r" (&lock->val), "r" (new), "r" (_Q_TAIL_CPU_MASK),
> +	  "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0)
> +	: "cr0", "memory");
> +
> +	if (likely(!(prev & ~_Q_TAIL_CPU_MASK)))
> +		return 1;
> +	return 0;
> +}
> +
> +static __always_inline int queued_spin_trylock(struct qspinlock *lock)
> +{
> +	if (!_Q_SPIN_TRY_LOCK_STEAL)
> +		return __queued_spin_trylock_nosteal(lock);
> +	else
> +		return __queued_spin_trylock_steal(lock);
> +}
> +
>  void queued_spin_lock_slowpath(struct qspinlock *lock);
>  
>  static __always_inline void queued_spin_lock(struct qspinlock *lock)
> diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
> index 3b10e31bcf0a..277aef1fab0a 100644
> --- a/arch/powerpc/lib/qspinlock.c
> +++ b/arch/powerpc/lib/qspinlock.c
> @@ -24,7 +24,11 @@ struct qnodes {
>  
>  /* Tuning parameters */
>  static int STEAL_SPINS __read_mostly = (1<<5);
> +#if _Q_SPIN_TRY_LOCK_STEAL == 1
> +static const bool MAYBE_STEALERS = true;
> +#else
>  static bool MAYBE_STEALERS __read_mostly = true;
> +#endif
>  static int HEAD_SPINS __read_mostly = (1<<8);
>  
>  static bool pv_yield_owner __read_mostly = true;
> @@ -522,6 +526,10 @@ void pv_spinlocks_init(void)
>  #include <linux/debugfs.h>
>  static int steal_spins_set(void *data, u64 val)
>  {
> +#if _Q_SPIN_TRY_LOCK_STEAL == 1
> +	/* MAYBE_STEAL remains true */
> +	STEAL_SPINS = val;
> +#else
>  	static DEFINE_MUTEX(lock);
>  
>  	mutex_lock(&lock);
> @@ -539,6 +547,7 @@ static int steal_spins_set(void *data, u64 val)
>  		STEAL_SPINS = val;
>  	}
>  	mutex_unlock(&lock);
> +#endif
>  
>  	return 0;
>  }


  parent reply	other threads:[~2022-11-10  0:49 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-28  6:31 [PATCH 00/17] powerpc: alternate queued spinlock implementation Nicholas Piggin
2022-07-28  6:31 ` [PATCH 01/17] powerpc/qspinlock: powerpc qspinlock implementation Nicholas Piggin
2022-08-10  1:52   ` Jordan NIethe
2022-08-10  6:48     ` Christophe Leroy
2022-11-10  0:35   ` Jordan Niethe
2022-11-10  6:37     ` Christophe Leroy
2022-11-10 11:44       ` Nicholas Piggin
2022-11-10  9:09     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 1a/17] powerpc/qspinlock: Prepare qspinlock code Nicholas Piggin
2022-07-28  6:31 ` [PATCH 02/17] powerpc/qspinlock: add mcs queueing for contended waiters Nicholas Piggin
2022-08-10  2:28   ` Jordan NIethe
2022-11-10  0:36   ` Jordan Niethe
2022-11-10  9:21     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 03/17] powerpc/qspinlock: use a half-word store to unlock to avoid larx/stcx Nicholas Piggin
2022-08-10  3:28   ` Jordan Niethe
2022-11-10  0:39   ` Jordan Niethe
2022-11-10  9:25     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 04/17] powerpc/qspinlock: convert atomic operations to assembly Nicholas Piggin
2022-08-10  3:54   ` Jordan Niethe
2022-11-10  0:39   ` Jordan Niethe
2022-11-10  8:36     ` Christophe Leroy
2022-11-10 11:48       ` Nicholas Piggin
2022-11-10  9:40     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 05/17] powerpc/qspinlock: allow new waiters to steal the lock before queueing Nicholas Piggin
2022-08-10  4:31   ` Jordan Niethe
2022-11-10  0:40   ` Jordan Niethe
2022-11-10 10:54     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 06/17] powerpc/qspinlock: theft prevention to control latency Nicholas Piggin
2022-08-10  5:51   ` Jordan Niethe
2022-11-10  0:40   ` Jordan Niethe
2022-11-10 10:57     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 07/17] powerpc/qspinlock: store owner CPU in lock word Nicholas Piggin
2022-08-12  0:50   ` Jordan Niethe
2022-11-10  0:40   ` Jordan Niethe
2022-11-10 10:59     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 08/17] powerpc/qspinlock: paravirt yield to lock owner Nicholas Piggin
2022-08-12  2:01   ` Jordan Niethe
2022-11-10  0:41   ` Jordan Niethe
2022-11-10 11:13     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 09/17] powerpc/qspinlock: implement option to yield to previous node Nicholas Piggin
2022-08-12  2:07   ` Jordan Niethe
2022-11-10  0:41   ` Jordan Niethe
2022-11-10 11:14     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 10/17] powerpc/qspinlock: allow stealing when head of queue yields Nicholas Piggin
2022-08-12  4:06   ` Jordan Niethe
2022-11-10  0:42   ` Jordan Niethe
2022-11-10 11:22     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 11/17] powerpc/qspinlock: allow propagation of yield CPU down the queue Nicholas Piggin
2022-08-12  4:17   ` Jordan Niethe
2022-10-06 17:27   ` Laurent Dufour
2022-11-10  0:42   ` Jordan Niethe
2022-11-10 11:25     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 12/17] powerpc/qspinlock: add ability to prod new queue head CPU Nicholas Piggin
2022-08-12  4:22   ` Jordan Niethe
2022-11-10  0:42   ` Jordan Niethe
2022-11-10 11:32     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 13/17] powerpc/qspinlock: trylock and initial lock attempt may steal Nicholas Piggin
2022-08-12  4:32   ` Jordan Niethe
2022-11-10  0:43   ` Jordan Niethe [this message]
2022-11-10 11:35     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 14/17] powerpc/qspinlock: use spin_begin/end API Nicholas Piggin
2022-08-12  4:36   ` Jordan Niethe
2022-11-10  0:43   ` Jordan Niethe
2022-11-10 11:36     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 15/17] powerpc/qspinlock: reduce remote node steal spins Nicholas Piggin
2022-08-12  4:43   ` Jordan Niethe
2022-11-10  0:43   ` Jordan Niethe
2022-11-10 11:37     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 16/17] powerpc/qspinlock: allow indefinite spinning on a preempted owner Nicholas Piggin
2022-08-12  4:49   ` Jordan Niethe
2022-09-22 15:02   ` Laurent Dufour
2022-09-23  8:16     ` Nicholas Piggin
2022-11-10  0:44   ` Jordan Niethe
2022-11-10 11:38     ` Nicholas Piggin
2022-07-28  6:31 ` [PATCH 17/17] powerpc/qspinlock: provide accounting and options for sleepy locks Nicholas Piggin
2022-08-15  1:11   ` Jordan Niethe
2022-11-10  0:44   ` Jordan Niethe
2022-11-10 11:41     ` Nicholas Piggin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=102be7ce979d8edc5eec56881a5d14c1eb2211b2.camel@gmail.com \
    --to=jniethe5@gmail.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=npiggin@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.