All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5] locking/pvqspinlock: Relax cmpxchg's to improve performance on some archs
@ 2017-02-23 14:13 Waiman Long
  2017-02-27 17:06 ` Pan Xinhui
  0 siblings, 1 reply; 2+ messages in thread
From: Waiman Long @ 2017-02-23 14:13 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar
  Cc: linux-kernel, Pan Xinhui, Boqun Feng, Andrea Parri, Waiman Long

All the locking related cmpxchg's in the following functions are
replaced with the _acquire variants:
 - pv_queued_spin_steal_lock()
 - trylock_clear_pending()

This change should help performance on architectures that use LL/SC.

On a 2-core 16-thread Power8 system with pvqspinlock explicitly
enabled, the performance of a locking microbenchmark with and without
this patch on a 4.10-rc8 kernel with Xinhui's PPC qspinlock patch
were as follows:

  # of thread     w/o patch    with patch      % Change
  -----------     ---------    ----------      --------
       4         4053.3 Mop/s  4223.7 Mop/s     +4.2%
       8         3310.4 Mop/s  3406.0 Mop/s     +2.9%
      12         2576.4 Mop/s  2674.6 Mop/s     +3.8%

Signed-off-by: Waiman Long <longman@redhat.com>
---
 v4->v5:
  - Correct some grammatical issues in comment.

 v3->v4:
  - Update the comment in pv_kick_node() to mention that the code
    may not work in some archs.

 v2->v3:
  - Reduce scope by relaxing cmpxchg's in fast path only.

 v1->v2:
  - Add comments in changelog and code for the rationale of the change.

 kernel/locking/qspinlock_paravirt.h | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index e6b2f7a..4614e39 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -72,7 +72,7 @@ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
 	struct __qspinlock *l = (void *)lock;
 
 	if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
-	    (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
+	    (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
 		qstat_inc(qstat_pv_lock_stealing, true);
 		return true;
 	}
@@ -101,16 +101,16 @@ static __always_inline void clear_pending(struct qspinlock *lock)
 
 /*
  * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
- * barrier. Therefore, an atomic cmpxchg() is used to acquire the lock
- * just to be sure that it will get it.
+ * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
+ * lock just to be sure that it will get it.
  */
 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
 {
 	struct __qspinlock *l = (void *)lock;
 
 	return !READ_ONCE(l->locked) &&
-	       (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
-			== _Q_PENDING_VAL);
+	       (cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL,
+				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
 }
 #else /* _Q_PENDING_BITS == 8 */
 static __always_inline void set_pending(struct qspinlock *lock)
@@ -138,7 +138,7 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
 		 */
 		old = val;
 		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
-		val = atomic_cmpxchg(&lock->val, old, new);
+		val = atomic_cmpxchg_acquire(&lock->val, old, new);
 
 		if (val == old)
 			return 1;
@@ -361,6 +361,13 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
 	 * observe its next->locked value and advance itself.
 	 *
 	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
+	 *
+	 * The write to next->locked in arch_mcs_spin_unlock_contended()
+	 * must be ordered before the read of pn->state in the cmpxchg()
+	 * below for the code to work correctly. However, this is not
+	 * guaranteed on all architectures when the cmpxchg() call fails.
+	 * Both x86 and PPC can provide that guarantee, but other
+	 * architectures not necessarily.
 	 */
 	if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted)
 		return;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH v5] locking/pvqspinlock: Relax cmpxchg's to improve performance on some archs
  2017-02-23 14:13 [PATCH v5] locking/pvqspinlock: Relax cmpxchg's to improve performance on some archs Waiman Long
@ 2017-02-27 17:06 ` Pan Xinhui
  0 siblings, 0 replies; 2+ messages in thread
From: Pan Xinhui @ 2017-02-27 17:06 UTC (permalink / raw)
  To: Waiman Long, Peter Zijlstra, Ingo Molnar
  Cc: linux-kernel, Boqun Feng, Andrea Parri



在 2017/2/23 22:13, Waiman Long 写道:
> All the locking related cmpxchg's in the following functions are
> replaced with the _acquire variants:
>  - pv_queued_spin_steal_lock()
>  - trylock_clear_pending()
>
> This change should help performance on architectures that use LL/SC.
>
> On a 2-core 16-thread Power8 system with pvqspinlock explicitly
> enabled, the performance of a locking microbenchmark with and without
> this patch on a 4.10-rc8 kernel with Xinhui's PPC qspinlock patch
> were as follows:
>
>   # of thread     w/o patch    with patch      % Change
>   -----------     ---------    ----------      --------
>        4         4053.3 Mop/s  4223.7 Mop/s     +4.2%
>        8         3310.4 Mop/s  3406.0 Mop/s     +2.9%
>       12         2576.4 Mop/s  2674.6 Mop/s     +3.8%
>
> Signed-off-by: Waiman Long <longman@redhat.com>
> ---
Works on my side :)

Reviewed-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>

>  v4->v5:
>   - Correct some grammatical issues in comment.
>
>  v3->v4:
>   - Update the comment in pv_kick_node() to mention that the code
>     may not work in some archs.
>
>  v2->v3:
>   - Reduce scope by relaxing cmpxchg's in fast path only.
>
>  v1->v2:
>   - Add comments in changelog and code for the rationale of the change.
>
>  kernel/locking/qspinlock_paravirt.h | 19 +++++++++++++------
>  1 file changed, 13 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
> index e6b2f7a..4614e39 100644
> --- a/kernel/locking/qspinlock_paravirt.h
> +++ b/kernel/locking/qspinlock_paravirt.h
> @@ -72,7 +72,7 @@ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
>  	struct __qspinlock *l = (void *)lock;
>
>  	if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
> -	    (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
> +	    (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
>  		qstat_inc(qstat_pv_lock_stealing, true);
>  		return true;
>  	}
> @@ -101,16 +101,16 @@ static __always_inline void clear_pending(struct qspinlock *lock)
>
>  /*
>   * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
> - * barrier. Therefore, an atomic cmpxchg() is used to acquire the lock
> - * just to be sure that it will get it.
> + * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
> + * lock just to be sure that it will get it.
>   */
>  static __always_inline int trylock_clear_pending(struct qspinlock *lock)
>  {
>  	struct __qspinlock *l = (void *)lock;
>
>  	return !READ_ONCE(l->locked) &&
> -	       (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
> -			== _Q_PENDING_VAL);
> +	       (cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL,
> +				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
>  }
>  #else /* _Q_PENDING_BITS == 8 */
>  static __always_inline void set_pending(struct qspinlock *lock)
> @@ -138,7 +138,7 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
>  		 */
>  		old = val;
>  		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
> -		val = atomic_cmpxchg(&lock->val, old, new);
> +		val = atomic_cmpxchg_acquire(&lock->val, old, new);
>
>  		if (val == old)
>  			return 1;
> @@ -361,6 +361,13 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
>  	 * observe its next->locked value and advance itself.
>  	 *
>  	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
> +	 *
> +	 * The write to next->locked in arch_mcs_spin_unlock_contended()
> +	 * must be ordered before the read of pn->state in the cmpxchg()
> +	 * below for the code to work correctly. However, this is not
> +	 * guaranteed on all architectures when the cmpxchg() call fails.
> +	 * Both x86 and PPC can provide that guarantee, but other
> +	 * architectures not necessarily.
>  	 */
>  	if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted)
>  		return;
>

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2017-02-27 17:08 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-23 14:13 [PATCH v5] locking/pvqspinlock: Relax cmpxchg's to improve performance on some archs Waiman Long
2017-02-27 17:06 ` Pan Xinhui

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.