All of lore.kernel.org
 help / color / mirror / Atom feed
From: Waiman Long <Waiman.Long@hp.com>
To: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Peter Zijlstra <peterz@infradead.org>
Cc: linux-arch@vger.kernel.org, Waiman Long <Waiman.Long@hp.com>,
	Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>,
	Gleb Natapov <gleb@redhat.com>,
	kvm@vger.kernel.org, Scott J Norton <scott.norton@hp.com>,
	x86@kernel.org, Paolo Bonzini <paolo.bonzini@gmail.com>,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	Chegu Vinod <chegu_vinod@hp.com>,
	David Vrabel <david.vrabel@citrix.com>,
	Oleg Nesterov <oleg@redhat.com>,
	xen-devel@lists.xenproject.org,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
	Linus Torvalds <torvalds@linux-foundation.org>
Subject: [PATCH v11 13/16] pvqspinlock: Enable coexistence with the unfair lock
Date: Fri, 30 May 2014 11:43:59 -0400	[thread overview]
Message-ID: <1401464642-33890-14-git-send-email-Waiman.Long__24750.0466219664$1401464915$gmane$org@hp.com> (raw)
In-Reply-To: <1401464642-33890-1-git-send-email-Waiman.Long@hp.com>

This patch enables the coexistence of both the PV qspinlock and
unfair lock.  When both are enabled, however, only the lock fastpath
will perform lock stealing whereas the slowpath will have that disabled
to get the best of both features.

We also need to transition a CPU spinning too long in the pending
bit code path back to the regular queuing code path so that it can
be properly halted by the PV qspinlock code.

Signed-off-by: Waiman Long <Waiman.Long@hp.com>
---
 kernel/locking/qspinlock.c |   47 ++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 43 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 93c663a..8deedcf 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -57,12 +57,24 @@
 #include "mcs_spinlock.h"
 
 /*
+ * Check the pending bit spinning threshold only if PV qspinlock is enabled
+ */
+#define PSPIN_THRESHOLD		(1 << 10)
+#define MAX_NODES		4
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define pv_qspinlock_enabled()	static_key_false(&paravirt_spinlocks_enabled)
+#else
+#define pv_qspinlock_enabled()	false
+#endif
+
+/*
  * Per-CPU queue node structures; we can never have more than 4 nested
  * contexts: task, softirq, hardirq, nmi.
  *
  * Exactly fits one cacheline.
  */
-static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
+static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 
 /*
  * We must be able to distinguish between no-tail and the tail at 0:0,
@@ -265,6 +277,9 @@ static noinline void queue_spin_lock_slowerpath(struct qspinlock *lock,
 		ACCESS_ONCE(prev->next) = node;
 
 		arch_mcs_spin_lock_contended(&node->locked);
+	} else {
+		/* Mark it as the queue head */
+		ACCESS_ONCE(node->locked) = true;
 	}
 
 	/*
@@ -344,14 +359,17 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 	struct mcs_spinlock *node;
 	u32 new, old, tail;
 	int idx;
+	int retry = INT_MAX;	/* Retry count, queue if <= 0 */
 
 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
 #ifdef CONFIG_VIRT_UNFAIR_LOCKS
 	/*
 	 * A simple test and set unfair lock
+	 * Disable waiter lock stealing if PV spinlock is enabled
 	 */
-	if (static_key_false(&virt_unfairlocks_enabled)) {
+	if (!pv_qspinlock_enabled() &&
+	    static_key_false(&virt_unfairlocks_enabled)) {
 		cpu_relax();	/* Relax after a failed lock attempt */
 		while (!queue_spin_trylock(lock))
 			cpu_relax();
@@ -360,6 +378,14 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 #endif /* CONFIG_VIRT_UNFAIR_LOCKS */
 
 	/*
+	 * When PV qspinlock is enabled, exit the pending bit code path and
+	 * go back to the regular queuing path if the lock isn't available
+	 * within a certain threshold.
+	 */
+	if (pv_qspinlock_enabled())
+		retry = PSPIN_THRESHOLD;
+
+	/*
 	 * trylock || pending
 	 *
 	 * 0,0,0 -> 0,0,1 ; trylock
@@ -370,7 +396,7 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 		 * If we observe that the queue is not empty or both
 		 * the pending and lock bits are set, queue
 		 */
-		if ((val & _Q_TAIL_MASK) ||
+		if ((val & _Q_TAIL_MASK) || (retry-- <= 0) ||
 		    (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL)))
 			goto queue;
 
@@ -413,8 +439,21 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 	 * sequentiality; this because not all clear_pending_set_locked()
 	 * implementations imply full barriers.
 	 */
-	while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
+	while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK) {
+		if (pv_qspinlock_enabled() && (retry-- <= 0)) {
+			/*
+			 * Clear the pending bit and queue
+			 */
+			for (;;) {
+				new = val & ~_Q_PENDING_MASK;
+				old = atomic_cmpxchg(&lock->val, val, new);
+				if (old == val)
+					goto queue;
+				val = old;
+			}
+		}
 		arch_mutex_cpu_relax();
+	}
 
 	/*
 	 * take ownership and clear the pending bit.
-- 
1.7.1

  parent reply	other threads:[~2014-05-30 15:46 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-05-30 15:43 [PATCH v11 00/16] qspinlock: a 4-byte queue spinlock with PV support Waiman Long
2014-05-30 15:43 ` [PATCH v11 01/16] qspinlock: A simple generic 4-byte queue spinlock Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 02/16] qspinlock, x86: Enable x86-64 to use " Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 03/16] qspinlock: Add pending bit Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 04/16] qspinlock: Extract out the exchange of tail code word Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 05/16] qspinlock: Optimize for smaller NR_CPUS Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path Waiman Long
2014-06-11 10:26   ` Peter Zijlstra
2014-06-11 10:26     ` Peter Zijlstra
2014-06-11 21:22     ` Long, Wai Man
2014-06-11 21:22     ` Long, Wai Man
2014-06-11 21:22       ` Long, Wai Man
2014-06-12  6:00       ` Peter Zijlstra
2014-06-12 20:54         ` Waiman Long
2014-06-12 20:54           ` Waiman Long
2014-06-15 13:12           ` Peter Zijlstra
2014-06-15 13:12           ` Peter Zijlstra
2014-06-15 13:12           ` Peter Zijlstra
2014-06-12 20:54         ` Waiman Long
2014-06-12  6:00       ` Peter Zijlstra
2014-06-12  6:00       ` Peter Zijlstra
2014-06-11 10:26   ` Peter Zijlstra
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 07/16] qspinlock: Use a simple write to grab the lock, if applicable Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 08/16] qspinlock: Prepare for unfair lock support Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-06-11 10:54   ` Peter Zijlstra
2014-06-11 10:54     ` Peter Zijlstra
2014-06-11 11:38     ` Peter Zijlstra
2014-06-11 11:38     ` Peter Zijlstra
2014-06-11 11:38     ` Peter Zijlstra
2014-06-12  1:37     ` Long, Wai Man
2014-06-12  1:37     ` Long, Wai Man
2014-06-12  1:37       ` Long, Wai Man
2014-06-12  5:50       ` Peter Zijlstra
2014-06-12  5:50         ` Peter Zijlstra
2014-06-12 21:08         ` Waiman Long
2014-06-12 21:08           ` Waiman Long
2014-06-15 13:14           ` Peter Zijlstra
2014-06-15 13:14           ` Peter Zijlstra
2014-06-15 13:14             ` Peter Zijlstra
2014-06-12 21:08         ` Waiman Long
2014-06-12  5:50       ` Peter Zijlstra
2014-06-11 10:54   ` Peter Zijlstra
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 10/16] qspinlock: Split the MCS queuing code into a separate slowerpath Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 11/16] pvqspinlock, x86: Rename paravirt_ticketlocks_enabled Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 12/16] pvqspinlock, x86: Add PV data structure & methods Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` Waiman Long
2014-05-30 15:43 ` [PATCH v11 13/16] pvqspinlock: Enable coexistence with the unfair lock Waiman Long
2014-05-30 15:43   ` Waiman Long
2014-05-30 15:43 ` Waiman Long [this message]
2014-05-30 15:44 ` [PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support Waiman Long
2014-06-12  8:17   ` Peter Zijlstra
2014-06-12  8:17     ` Peter Zijlstra
2014-06-12 20:48     ` Waiman Long
2014-06-12 20:48       ` Waiman Long
2014-06-15 13:16       ` Peter Zijlstra
2014-06-15 13:16       ` Peter Zijlstra
2014-06-15 13:16         ` Peter Zijlstra
2014-06-17 20:59         ` Konrad Rzeszutek Wilk
2014-06-17 20:59         ` Konrad Rzeszutek Wilk
2014-06-17 20:59           ` Konrad Rzeszutek Wilk
2014-06-12 20:48     ` Waiman Long
2014-06-12  8:17   ` Peter Zijlstra
2014-05-30 15:44 ` Waiman Long
2014-05-30 15:44 ` Waiman Long
2014-05-30 15:44 ` [PATCH v11 15/16] pvqspinlock, x86: Enable PV qspinlock PV for KVM Waiman Long
2014-05-30 15:44 ` Waiman Long
2014-05-30 15:44 ` Waiman Long
2014-05-30 15:44 ` [PATCH v11 16/16] pvqspinlock, x86: Enable PV qspinlock for XEN Waiman Long
2014-05-30 15:44 ` Waiman Long
2014-05-30 15:44 ` Waiman Long

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='1401464642-33890-14-git-send-email-Waiman.Long__24750.0466219664$1401464915$gmane$org@hp.com' \
    --to=waiman.long@hp.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=chegu_vinod@hp.com \
    --cc=david.vrabel@citrix.com \
    --cc=gleb@redhat.com \
    --cc=hpa@zytor.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=oleg@redhat.com \
    --cc=paolo.bonzini@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=raghavendra.kt@linux.vnet.ibm.com \
    --cc=scott.norton@hp.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.