All of lore.kernel.org
 help / color / mirror / Atom feed
From: Waiman Long <Waiman.Long@hp.com>
To: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Peter Zijlstra <peterz@infradead.org>
Cc: linux-arch@vger.kernel.org, x86@kernel.org,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	xen-devel@lists.xenproject.org, kvm@vger.kernel.org,
	Paolo Bonzini <paolo.bonzini@gmail.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
	Rik van Riel <riel@redhat.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>,
	David Vrabel <david.vrabel@citrix.com>,
	Oleg Nesterov <oleg@redhat.com>,
	Daniel J Blueman <daniel@numascale.com>,
	Scott J Norton <scott.norton@hp.com>,
	Douglas Hatch <doug.hatch@hp.com>,
	Waiman Long <Waiman.Long@hp.com>
Subject: [PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
Date: Mon,  6 Apr 2015 22:55:44 -0400	[thread overview]
Message-ID: <1428375350-9213-10-git-send-email-Waiman.Long@hp.com> (raw)
In-Reply-To: <1428375350-9213-1-git-send-email-Waiman.Long@hp.com>

Provide a separate (second) version of the spin_lock_slowpath for
paravirt along with a special unlock path.

The second slowpath is generated by adding a few pv hooks to the
normal slowpath, but where those will compile away for the native
case, they expand into special wait/wake code for the pv version.

The actual MCS queue can use extra storage in the mcs_nodes[] array to
keep track of state and therefore uses directed wakeups.

The head contender has no such storage directly visible to the
unlocker.  So the unlocker searches a hash table with open addressing
using a simple binary Galois linear feedback shift register.

Signed-off-by: Waiman Long <Waiman.Long@hp.com>
---
 kernel/locking/qspinlock.c          |   69 ++++++++-
 kernel/locking/qspinlock_paravirt.h |  321 +++++++++++++++++++++++++++++++++++
 2 files changed, 389 insertions(+), 1 deletions(-)
 create mode 100644 kernel/locking/qspinlock_paravirt.h

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index fc2e5ab..33b3f54 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -18,6 +18,9 @@
  * Authors: Waiman Long <waiman.long@hp.com>
  *          Peter Zijlstra <peterz@infradead.org>
  */
+
+#ifndef _GEN_PV_LOCK_SLOWPATH
+
 #include <linux/smp.h>
 #include <linux/bug.h>
 #include <linux/cpumask.h>
@@ -65,13 +68,21 @@
 
 #include "mcs_spinlock.h"
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define MAX_NODES	8
+#else
+#define MAX_NODES	4
+#endif
+
 /*
  * Per-CPU queue node structures; we can never have more than 4 nested
  * contexts: task, softirq, hardirq, nmi.
  *
  * Exactly fits one 64-byte cacheline on a 64-bit architecture.
+ *
+ * PV doubles the storage and uses the second cacheline for PV state.
  */
-static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
+static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 
 /*
  * We must be able to distinguish between no-tail and the tail at 0:0,
@@ -220,6 +231,33 @@ static __always_inline void set_locked(struct qspinlock *lock)
 	WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
 }
 
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+					   struct mcs_spinlock *node) { }
+
+#define pv_enabled()		false
+
+#define pv_init_node		__pv_init_node
+#define pv_wait_node		__pv_wait_node
+#define pv_kick_node		__pv_kick_node
+
+#define pv_wait_head		__pv_wait_head
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define queue_spin_lock_slowpath	native_queue_spin_lock_slowpath
+#endif
+
+#endif /* _GEN_PV_LOCK_SLOWPATH */
+
 /**
  * queue_spin_lock_slowpath - acquire the queue spinlock
  * @lock: Pointer to queue spinlock structure
@@ -249,6 +287,9 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 
 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
+	if (pv_enabled())
+		goto queue;
+
 	if (virt_queue_spin_lock(lock))
 		return;
 
@@ -325,6 +366,7 @@ queue:
 	node += idx;
 	node->locked = 0;
 	node->next = NULL;
+	pv_init_node(node);
 
 	/*
 	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -350,6 +392,7 @@ queue:
 		prev = decode_tail(old);
 		WRITE_ONCE(prev->next, node);
 
+		pv_wait_node(node);
 		arch_mcs_spin_lock_contended(&node->locked);
 	}
 
@@ -365,6 +408,7 @@ queue:
 	 * does not imply a full barrier.
 	 *
 	 */
+	pv_wait_head(lock, node);
 	while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
 		cpu_relax();
 
@@ -397,6 +441,7 @@ queue:
 		cpu_relax();
 
 	arch_mcs_spin_unlock_contended(&next->locked);
+	pv_kick_node(next);
 
 release:
 	/*
@@ -405,3 +450,25 @@ release:
 	this_cpu_dec(mcs_nodes[0].count);
 }
 EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queue_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef  pv_enabled
+#define pv_enabled()	true
+
+#undef pv_init_node
+#undef pv_wait_node
+#undef pv_kick_node
+#undef pv_wait_head
+
+#undef  queue_spin_lock_slowpath
+#define queue_spin_lock_slowpath	__pv_queue_spin_lock_slowpath
+
+#include "qspinlock_paravirt.h"
+#include "qspinlock.c"
+
+#endif
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
new file mode 100644
index 0000000..49dbd39
--- /dev/null
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -0,0 +1,321 @@
+#ifndef _GEN_PV_LOCK_SLOWPATH
+#error "do not include this file"
+#endif
+
+/*
+ * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
+ * of spinning them.
+ *
+ * This relies on the architecture to provide two paravirt hypercalls:
+ *
+ *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
+ *   pv_kick(cpu)             -- wakes a suspended vcpu
+ *
+ * Using these we implement __pv_queue_spin_lock_slowpath() and
+ * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
+ * native_queue_spin_unlock().
+ */
+
+#define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
+
+enum vcpu_state {
+	vcpu_running = 0,
+	vcpu_halted,
+};
+
+struct pv_node {
+	struct mcs_spinlock	mcs;
+	struct mcs_spinlock	__res[3];
+
+	int			cpu;
+	u8			state;
+};
+
+/*
+ * Hash table using open addressing with an LFSR probe sequence.
+ *
+ * Since we should not be holding locks from NMI context (very rare indeed) the
+ * max load factor is 0.75, which is around the point where open addressing
+ * breaks down.
+ *
+ * Instead of probing just the immediate bucket we probe all buckets in the
+ * same cacheline.
+ *
+ * http://en.wikipedia.org/wiki/Hash_table#Open_addressing
+ *
+ * Dynamically allocate a hash table big enough to hold at least 4X the
+ * number of possible cpus in the system. Allocation is done on page
+ * granularity. So the minimum number of hash buckets should be at least
+ * 256 to fully utilize a 4k page.
+ */
+#define LFSR_MIN_BITS	8
+#define	LFSR_MAX_BITS	(2 + NR_CPUS_BITS)
+#if LFSR_MAX_BITS < LFSR_MIN_BITS
+#undef  LFSR_MAX_BITS
+#define LFSR_MAX_BITS	LFSR_MIN_BITS
+#endif
+
+struct pv_hash_bucket {
+	struct qspinlock *lock;
+	struct pv_node   *node;
+};
+#define PV_HB_PER_LINE	(SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
+#define HB_RESERVED	((struct qspinlock *)1)
+
+static struct pv_hash_bucket *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+#include <linux/hash.h>
+#include <linux/lfsr.h>
+#include <linux/bootmem.h>
+
+/*
+ * Allocate memory for the PV qspinlock hash buckets
+ *
+ * This function should be called from the paravirt spinlock initialization
+ * routine.
+ */
+void __init __pv_init_lock_hash(void)
+{
+	int pv_hash_size = 4 * num_possible_cpus();
+
+	if (pv_hash_size < (1U << LFSR_MIN_BITS))
+		pv_hash_size = (1U << LFSR_MIN_BITS);
+	/*
+	 * Allocate space from bootmem which should be page-size aligned
+	 * and hence cacheline aligned.
+	 */
+	pv_lock_hash = alloc_large_system_hash("PV qspinlock",
+					       sizeof(struct pv_hash_bucket),
+					       pv_hash_size, 0, HASH_EARLY,
+					       &pv_lock_hash_bits, NULL,
+					       pv_hash_size, pv_hash_size);
+}
+
+static inline u32 hash_align(u32 hash)
+{
+	return hash & ~(PV_HB_PER_LINE - 1);
+}
+
+static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
+{
+	unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits);
+	struct pv_hash_bucket *hb, *end;
+
+	if (!hash)
+		hash = 1;
+
+	init_hash = hash;
+	hb = &pv_lock_hash[hash_align(hash)];
+	for (;;) {
+		for (end = hb + PV_HB_PER_LINE; hb < end; hb++) {
+			if (!cmpxchg(&hb->lock, NULL, lock)) {
+				WRITE_ONCE(hb->node, node);
+				/*
+				 * We haven't set the _Q_SLOW_VAL yet. So
+				 * the order of writing doesn't matter.
+				 */
+				smp_wmb(); /* matches rmb from pv_hash_find */
+				goto done;
+			}
+		}
+
+		hash = lfsr(hash, pv_lock_hash_bits, 0);
+		hb = &pv_lock_hash[hash_align(hash)];
+		BUG_ON(hash == init_hash);
+	}
+
+done:
+	return &hb->lock;
+}
+
+static struct pv_node *pv_hash_find(struct qspinlock *lock)
+{
+	unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits);
+	struct pv_hash_bucket *hb, *end;
+	struct pv_node *node = NULL;
+
+	if (!hash)
+		hash = 1;
+
+	init_hash = hash;
+	hb = &pv_lock_hash[hash_align(hash)];
+	for (;;) {
+		for (end = hb + PV_HB_PER_LINE; hb < end; hb++) {
+			struct qspinlock *l = READ_ONCE(hb->lock);
+
+			if (l == lock) {
+				smp_rmb(); /* matches wmb from pv_hash() */
+				node = READ_ONCE(hb->node);
+				goto done;
+			}
+		}
+
+		hash = lfsr(hash, pv_lock_hash_bits, 0);
+		hb = &pv_lock_hash[hash_align(hash)];
+		BUG_ON(hash == init_hash);
+	}
+done:
+	/*
+	 * Clear the hash bucket
+	 */
+	WRITE_ONCE(hb->lock, NULL);
+	return node;
+}
+
+/*
+ * Initialize the PV part of the mcs_spinlock node.
+ */
+static void pv_init_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+
+	BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
+
+	pn->cpu = smp_processor_id();
+	pn->state = vcpu_running;
+}
+
+/*
+ * Wait for node->locked to become true, halt the vcpu after a short spin.
+ * pv_kick_node() is used to wake the vcpu again.
+ */
+static void pv_wait_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+	int loop;
+
+	for (;;) {
+		for (loop = SPIN_THRESHOLD; loop; loop--) {
+			if (READ_ONCE(node->locked))
+				return;
+
+			cpu_relax();
+		}
+
+		/*
+		 * Order pn->state vs pn->locked thusly:
+		 *
+		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
+		 *     MB			      MB
+		 * [L] pn->locked		[RmW] pn->state = vcpu_running
+		 *
+		 * Matches the xchg() from pv_kick_node().
+		 */
+		(void)xchg(&pn->state, vcpu_halted);
+
+		if (!READ_ONCE(node->locked))
+			pv_wait(&pn->state, vcpu_halted);
+
+		/* Make sure that state is correct for spurious wakeup */
+		WRITE_ONCE(pn->state, vcpu_running);
+	}
+
+	/*
+	 * By now our node->locked should be 1 and our caller will not actually
+	 * spin-wait for it. We do however rely on our caller to do a
+	 * load-acquire for us.
+	 */
+}
+
+/*
+ * Called after setting next->locked = 1, used to wake those stuck in
+ * pv_wait_node().
+ */
+static void pv_kick_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+
+	/*
+	 * Note that because node->locked is already set, this actual
+	 * mcs_spinlock entry could be re-used already.
+	 *
+	 * This should be fine however, kicking people for no reason is
+	 * harmless.
+	 *
+	 * See the comment in pv_wait_node().
+	 */
+	if (xchg(&pn->state, vcpu_running) == vcpu_halted)
+		pv_kick(pn->cpu);
+}
+
+/*
+ * Wait for l->locked to become clear; halt the vcpu after a short spin.
+ * __pv_queue_spin_unlock() will wake us.
+ */
+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
+{
+	struct __qspinlock *l = (void *)lock;
+	struct qspinlock **lp = NULL;
+	struct pv_node *pn = (struct pv_node *)node;
+	int slow_set = false;
+	int loop;
+
+	for (;;) {
+		for (loop = SPIN_THRESHOLD; loop; loop--) {
+			if (!READ_ONCE(l->locked))
+				return;
+
+			cpu_relax();
+		}
+
+		WRITE_ONCE(pn->state, vcpu_halted);
+		if (!lp)
+			lp = pv_hash(lock, pn);
+		/*
+		 * lp must be set before setting _Q_SLOW_VAL
+		 *
+		 * [S] lp = lock                [RmW] l = l->locked = 0
+		 *     MB                             MB
+		 * [S] l->locked = _Q_SLOW_VAL  [L]   lp
+		 *
+		 * Matches the cmpxchg() in pv_queue_spin_unlock().
+		 */
+		if (!slow_set &&
+		    !cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
+			/*
+			 * The lock is free and _Q_SLOW_VAL has never been
+			 * set. Need to clear the hash bucket before getting
+			 * the lock.
+			 */
+			WRITE_ONCE(*lp, NULL);
+			return;
+		} else if (slow_set && !READ_ONCE(l->locked))
+			return;
+		slow_set = true;
+
+		pv_wait(&l->locked, _Q_SLOW_VAL);
+	}
+	/*
+	 * Lock is unlocked now; the caller will acquire it without waiting.
+	 * As with pv_wait_node() we rely on the caller to do a load-acquire
+	 * for us.
+	 */
+}
+
+/*
+ * To be used in stead of queue_spin_unlock() for paravirt locks. Wakes
+ * pv_wait_head() if appropriate.
+ */
+__visible void __pv_queue_spin_unlock(struct qspinlock *lock)
+{
+	struct __qspinlock *l = (void *)lock;
+	struct pv_node *node;
+
+	if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+		return;
+
+	/*
+	 * The queue head has been halted. Need to locate it and wake it up.
+	 */
+	node = pv_hash_find(lock);
+	smp_store_release(&l->locked, 0);
+
+	/*
+	 * At this point the memory pointed at by lock can be freed/reused,
+	 * however we can still use the PV node to kick the CPU.
+	 */
+	if (READ_ONCE(node->state) == vcpu_halted)
+		pv_kick(node->cpu);
+}
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock);
-- 
1.7.1


WARNING: multiple messages have this Message-ID (diff)
From: Waiman Long <Waiman.Long@hp.com>
To: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Peter Zijlstra <peterz@infradead.org>
Cc: linux-arch@vger.kernel.org, Waiman Long <Waiman.Long@hp.com>,
	Rik van Riel <riel@redhat.com>,
	Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>,
	kvm@vger.kernel.org, Daniel J Blueman <daniel@numascale.com>,
	x86@kernel.org, Paolo Bonzini <paolo.bonzini@gmail.com>,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	Scott J Norton <scott.norton@hp.com>,
	David Vrabel <david.vrabel@citrix.com>,
	Oleg Nesterov <oleg@redhat.com>,
	xen-devel@lists.xenproject.org,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Douglas Hatch <doug.hatch@hp.com>
Subject: [PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
Date: Mon,  6 Apr 2015 22:55:44 -0400	[thread overview]
Message-ID: <1428375350-9213-10-git-send-email-Waiman.Long@hp.com> (raw)
In-Reply-To: <1428375350-9213-1-git-send-email-Waiman.Long@hp.com>

Provide a separate (second) version of the spin_lock_slowpath for
paravirt along with a special unlock path.

The second slowpath is generated by adding a few pv hooks to the
normal slowpath, but where those will compile away for the native
case, they expand into special wait/wake code for the pv version.

The actual MCS queue can use extra storage in the mcs_nodes[] array to
keep track of state and therefore uses directed wakeups.

The head contender has no such storage directly visible to the
unlocker.  So the unlocker searches a hash table with open addressing
using a simple binary Galois linear feedback shift register.

Signed-off-by: Waiman Long <Waiman.Long@hp.com>
---
 kernel/locking/qspinlock.c          |   69 ++++++++-
 kernel/locking/qspinlock_paravirt.h |  321 +++++++++++++++++++++++++++++++++++
 2 files changed, 389 insertions(+), 1 deletions(-)
 create mode 100644 kernel/locking/qspinlock_paravirt.h

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index fc2e5ab..33b3f54 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -18,6 +18,9 @@
  * Authors: Waiman Long <waiman.long@hp.com>
  *          Peter Zijlstra <peterz@infradead.org>
  */
+
+#ifndef _GEN_PV_LOCK_SLOWPATH
+
 #include <linux/smp.h>
 #include <linux/bug.h>
 #include <linux/cpumask.h>
@@ -65,13 +68,21 @@
 
 #include "mcs_spinlock.h"
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define MAX_NODES	8
+#else
+#define MAX_NODES	4
+#endif
+
 /*
  * Per-CPU queue node structures; we can never have more than 4 nested
  * contexts: task, softirq, hardirq, nmi.
  *
  * Exactly fits one 64-byte cacheline on a 64-bit architecture.
+ *
+ * PV doubles the storage and uses the second cacheline for PV state.
  */
-static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
+static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 
 /*
  * We must be able to distinguish between no-tail and the tail at 0:0,
@@ -220,6 +231,33 @@ static __always_inline void set_locked(struct qspinlock *lock)
 	WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
 }
 
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+					   struct mcs_spinlock *node) { }
+
+#define pv_enabled()		false
+
+#define pv_init_node		__pv_init_node
+#define pv_wait_node		__pv_wait_node
+#define pv_kick_node		__pv_kick_node
+
+#define pv_wait_head		__pv_wait_head
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define queue_spin_lock_slowpath	native_queue_spin_lock_slowpath
+#endif
+
+#endif /* _GEN_PV_LOCK_SLOWPATH */
+
 /**
  * queue_spin_lock_slowpath - acquire the queue spinlock
  * @lock: Pointer to queue spinlock structure
@@ -249,6 +287,9 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 
 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
+	if (pv_enabled())
+		goto queue;
+
 	if (virt_queue_spin_lock(lock))
 		return;
 
@@ -325,6 +366,7 @@ queue:
 	node += idx;
 	node->locked = 0;
 	node->next = NULL;
+	pv_init_node(node);
 
 	/*
 	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -350,6 +392,7 @@ queue:
 		prev = decode_tail(old);
 		WRITE_ONCE(prev->next, node);
 
+		pv_wait_node(node);
 		arch_mcs_spin_lock_contended(&node->locked);
 	}
 
@@ -365,6 +408,7 @@ queue:
 	 * does not imply a full barrier.
 	 *
 	 */
+	pv_wait_head(lock, node);
 	while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
 		cpu_relax();
 
@@ -397,6 +441,7 @@ queue:
 		cpu_relax();
 
 	arch_mcs_spin_unlock_contended(&next->locked);
+	pv_kick_node(next);
 
 release:
 	/*
@@ -405,3 +450,25 @@ release:
 	this_cpu_dec(mcs_nodes[0].count);
 }
 EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queue_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef  pv_enabled
+#define pv_enabled()	true
+
+#undef pv_init_node
+#undef pv_wait_node
+#undef pv_kick_node
+#undef pv_wait_head
+
+#undef  queue_spin_lock_slowpath
+#define queue_spin_lock_slowpath	__pv_queue_spin_lock_slowpath
+
+#include "qspinlock_paravirt.h"
+#include "qspinlock.c"
+
+#endif
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
new file mode 100644
index 0000000..49dbd39
--- /dev/null
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -0,0 +1,321 @@
+#ifndef _GEN_PV_LOCK_SLOWPATH
+#error "do not include this file"
+#endif
+
+/*
+ * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
+ * of spinning them.
+ *
+ * This relies on the architecture to provide two paravirt hypercalls:
+ *
+ *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
+ *   pv_kick(cpu)             -- wakes a suspended vcpu
+ *
+ * Using these we implement __pv_queue_spin_lock_slowpath() and
+ * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
+ * native_queue_spin_unlock().
+ */
+
+#define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
+
+enum vcpu_state {
+	vcpu_running = 0,
+	vcpu_halted,
+};
+
+struct pv_node {
+	struct mcs_spinlock	mcs;
+	struct mcs_spinlock	__res[3];
+
+	int			cpu;
+	u8			state;
+};
+
+/*
+ * Hash table using open addressing with an LFSR probe sequence.
+ *
+ * Since we should not be holding locks from NMI context (very rare indeed) the
+ * max load factor is 0.75, which is around the point where open addressing
+ * breaks down.
+ *
+ * Instead of probing just the immediate bucket we probe all buckets in the
+ * same cacheline.
+ *
+ * http://en.wikipedia.org/wiki/Hash_table#Open_addressing
+ *
+ * Dynamically allocate a hash table big enough to hold at least 4X the
+ * number of possible cpus in the system. Allocation is done on page
+ * granularity. So the minimum number of hash buckets should be at least
+ * 256 to fully utilize a 4k page.
+ */
+#define LFSR_MIN_BITS	8
+#define	LFSR_MAX_BITS	(2 + NR_CPUS_BITS)
+#if LFSR_MAX_BITS < LFSR_MIN_BITS
+#undef  LFSR_MAX_BITS
+#define LFSR_MAX_BITS	LFSR_MIN_BITS
+#endif
+
+struct pv_hash_bucket {
+	struct qspinlock *lock;
+	struct pv_node   *node;
+};
+#define PV_HB_PER_LINE	(SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
+#define HB_RESERVED	((struct qspinlock *)1)
+
+static struct pv_hash_bucket *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+#include <linux/hash.h>
+#include <linux/lfsr.h>
+#include <linux/bootmem.h>
+
+/*
+ * Allocate memory for the PV qspinlock hash buckets
+ *
+ * This function should be called from the paravirt spinlock initialization
+ * routine.
+ */
+void __init __pv_init_lock_hash(void)
+{
+	int pv_hash_size = 4 * num_possible_cpus();
+
+	if (pv_hash_size < (1U << LFSR_MIN_BITS))
+		pv_hash_size = (1U << LFSR_MIN_BITS);
+	/*
+	 * Allocate space from bootmem which should be page-size aligned
+	 * and hence cacheline aligned.
+	 */
+	pv_lock_hash = alloc_large_system_hash("PV qspinlock",
+					       sizeof(struct pv_hash_bucket),
+					       pv_hash_size, 0, HASH_EARLY,
+					       &pv_lock_hash_bits, NULL,
+					       pv_hash_size, pv_hash_size);
+}
+
+static inline u32 hash_align(u32 hash)
+{
+	return hash & ~(PV_HB_PER_LINE - 1);
+}
+
+static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
+{
+	unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits);
+	struct pv_hash_bucket *hb, *end;
+
+	if (!hash)
+		hash = 1;
+
+	init_hash = hash;
+	hb = &pv_lock_hash[hash_align(hash)];
+	for (;;) {
+		for (end = hb + PV_HB_PER_LINE; hb < end; hb++) {
+			if (!cmpxchg(&hb->lock, NULL, lock)) {
+				WRITE_ONCE(hb->node, node);
+				/*
+				 * We haven't set the _Q_SLOW_VAL yet. So
+				 * the order of writing doesn't matter.
+				 */
+				smp_wmb(); /* matches rmb from pv_hash_find */
+				goto done;
+			}
+		}
+
+		hash = lfsr(hash, pv_lock_hash_bits, 0);
+		hb = &pv_lock_hash[hash_align(hash)];
+		BUG_ON(hash == init_hash);
+	}
+
+done:
+	return &hb->lock;
+}
+
+static struct pv_node *pv_hash_find(struct qspinlock *lock)
+{
+	unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits);
+	struct pv_hash_bucket *hb, *end;
+	struct pv_node *node = NULL;
+
+	if (!hash)
+		hash = 1;
+
+	init_hash = hash;
+	hb = &pv_lock_hash[hash_align(hash)];
+	for (;;) {
+		for (end = hb + PV_HB_PER_LINE; hb < end; hb++) {
+			struct qspinlock *l = READ_ONCE(hb->lock);
+
+			if (l == lock) {
+				smp_rmb(); /* matches wmb from pv_hash() */
+				node = READ_ONCE(hb->node);
+				goto done;
+			}
+		}
+
+		hash = lfsr(hash, pv_lock_hash_bits, 0);
+		hb = &pv_lock_hash[hash_align(hash)];
+		BUG_ON(hash == init_hash);
+	}
+done:
+	/*
+	 * Clear the hash bucket
+	 */
+	WRITE_ONCE(hb->lock, NULL);
+	return node;
+}
+
+/*
+ * Initialize the PV part of the mcs_spinlock node.
+ */
+static void pv_init_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+
+	BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
+
+	pn->cpu = smp_processor_id();
+	pn->state = vcpu_running;
+}
+
+/*
+ * Wait for node->locked to become true, halt the vcpu after a short spin.
+ * pv_kick_node() is used to wake the vcpu again.
+ */
+static void pv_wait_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+	int loop;
+
+	for (;;) {
+		for (loop = SPIN_THRESHOLD; loop; loop--) {
+			if (READ_ONCE(node->locked))
+				return;
+
+			cpu_relax();
+		}
+
+		/*
+		 * Order pn->state vs pn->locked thusly:
+		 *
+		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
+		 *     MB			      MB
+		 * [L] pn->locked		[RmW] pn->state = vcpu_running
+		 *
+		 * Matches the xchg() from pv_kick_node().
+		 */
+		(void)xchg(&pn->state, vcpu_halted);
+
+		if (!READ_ONCE(node->locked))
+			pv_wait(&pn->state, vcpu_halted);
+
+		/* Make sure that state is correct for spurious wakeup */
+		WRITE_ONCE(pn->state, vcpu_running);
+	}
+
+	/*
+	 * By now our node->locked should be 1 and our caller will not actually
+	 * spin-wait for it. We do however rely on our caller to do a
+	 * load-acquire for us.
+	 */
+}
+
+/*
+ * Called after setting next->locked = 1, used to wake those stuck in
+ * pv_wait_node().
+ */
+static void pv_kick_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+
+	/*
+	 * Note that because node->locked is already set, this actual
+	 * mcs_spinlock entry could be re-used already.
+	 *
+	 * This should be fine however, kicking people for no reason is
+	 * harmless.
+	 *
+	 * See the comment in pv_wait_node().
+	 */
+	if (xchg(&pn->state, vcpu_running) == vcpu_halted)
+		pv_kick(pn->cpu);
+}
+
+/*
+ * Wait for l->locked to become clear; halt the vcpu after a short spin.
+ * __pv_queue_spin_unlock() will wake us.
+ */
+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
+{
+	struct __qspinlock *l = (void *)lock;
+	struct qspinlock **lp = NULL;
+	struct pv_node *pn = (struct pv_node *)node;
+	int slow_set = false;
+	int loop;
+
+	for (;;) {
+		for (loop = SPIN_THRESHOLD; loop; loop--) {
+			if (!READ_ONCE(l->locked))
+				return;
+
+			cpu_relax();
+		}
+
+		WRITE_ONCE(pn->state, vcpu_halted);
+		if (!lp)
+			lp = pv_hash(lock, pn);
+		/*
+		 * lp must be set before setting _Q_SLOW_VAL
+		 *
+		 * [S] lp = lock                [RmW] l = l->locked = 0
+		 *     MB                             MB
+		 * [S] l->locked = _Q_SLOW_VAL  [L]   lp
+		 *
+		 * Matches the cmpxchg() in pv_queue_spin_unlock().
+		 */
+		if (!slow_set &&
+		    !cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
+			/*
+			 * The lock is free and _Q_SLOW_VAL has never been
+			 * set. Need to clear the hash bucket before getting
+			 * the lock.
+			 */
+			WRITE_ONCE(*lp, NULL);
+			return;
+		} else if (slow_set && !READ_ONCE(l->locked))
+			return;
+		slow_set = true;
+
+		pv_wait(&l->locked, _Q_SLOW_VAL);
+	}
+	/*
+	 * Lock is unlocked now; the caller will acquire it without waiting.
+	 * As with pv_wait_node() we rely on the caller to do a load-acquire
+	 * for us.
+	 */
+}
+
+/*
+ * To be used in stead of queue_spin_unlock() for paravirt locks. Wakes
+ * pv_wait_head() if appropriate.
+ */
+__visible void __pv_queue_spin_unlock(struct qspinlock *lock)
+{
+	struct __qspinlock *l = (void *)lock;
+	struct pv_node *node;
+
+	if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+		return;
+
+	/*
+	 * The queue head has been halted. Need to locate it and wake it up.
+	 */
+	node = pv_hash_find(lock);
+	smp_store_release(&l->locked, 0);
+
+	/*
+	 * At this point the memory pointed at by lock can be freed/reused,
+	 * however we can still use the PV node to kick the CPU.
+	 */
+	if (READ_ONCE(node->state) == vcpu_halted)
+		pv_kick(node->cpu);
+}
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock);
-- 
1.7.1

  parent reply	other threads:[~2015-04-07  2:59 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-07  2:55 [PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support Waiman Long
2015-04-07  2:55 ` [PATCH v15 01/15] qspinlock: A simple generic 4-byte queue spinlock Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 02/15] qspinlock, x86: Enable x86-64 to use " Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 03/15] qspinlock: Add pending bit Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 04/15] qspinlock: Extract out code snippets for the next patch Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 05/15] qspinlock: Optimize for smaller NR_CPUS Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 06/15] qspinlock: Use a simple write to grab the lock Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 07/15] qspinlock: Revert to test-and-set on hypervisors Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 08/15] lfsr: a simple binary Galois linear feedback shift register Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock Waiman Long
2015-04-07  2:55 ` Waiman Long [this message]
2015-04-07  2:55   ` Waiman Long
2015-04-09 18:13   ` Peter Zijlstra
2015-04-09 18:13   ` Peter Zijlstra
2015-04-09 18:13     ` Peter Zijlstra
2015-04-09 18:23     ` Peter Zijlstra
2015-04-09 18:23     ` Peter Zijlstra
2015-04-09 18:23       ` Peter Zijlstra
2015-04-09 20:36       ` Waiman Long
2015-04-09 20:36         ` Waiman Long
2015-04-09 20:36       ` Waiman Long
2015-04-09 21:41     ` Waiman Long
2015-04-09 21:41     ` Waiman Long
2015-04-09 21:41     ` Waiman Long
2015-04-13 14:47       ` Peter Zijlstra
2015-04-13 15:45         ` Waiman Long
2015-04-13 15:45           ` Waiman Long
2015-04-13 15:45         ` Waiman Long
2015-04-13 14:47       ` Peter Zijlstra
2015-04-13 14:47       ` Peter Zijlstra
2015-04-13 15:08       ` Peter Zijlstra
2015-04-13 15:08       ` Peter Zijlstra
2015-04-13 15:51         ` Waiman Long
2015-04-13 15:51           ` Waiman Long
2015-04-13 15:51         ` Waiman Long
2015-04-13 15:08       ` Peter Zijlstra
2015-04-13 15:09       ` Peter Zijlstra
2015-04-13 15:09       ` Peter Zijlstra
2015-04-13 15:09       ` Peter Zijlstra
2015-04-13 16:19         ` Waiman Long
2015-04-13 16:19         ` Waiman Long
2015-04-13 16:19         ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 10/15] pvqspinlock: Implement the paravirt qspinlock for x86 Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 11/15] pvqspinlock, x86: Enable PV qspinlock for KVM Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 12/15] pvqspinlock, x86: Enable PV qspinlock for Xen Waiman Long
2015-04-08 12:01   ` [Xen-devel] " David Vrabel
2015-04-08 12:01   ` David Vrabel
2015-04-08 12:01   ` [Xen-devel] " David Vrabel
2015-04-08 12:01     ` David Vrabel
2015-04-08 17:42     ` Waiman Long
2015-04-08 17:42     ` [Xen-devel] " Waiman Long
2015-04-08 17:42     ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55   ` Waiman Long
2015-04-09 19:57   ` Peter Zijlstra
2015-04-09 19:57   ` Peter Zijlstra
2015-04-09 19:57   ` Peter Zijlstra
2015-04-09 20:07     ` Peter Zijlstra
2015-04-09 20:07       ` Peter Zijlstra
2015-04-09 20:07     ` Peter Zijlstra
2015-04-09 22:06     ` Waiman Long
2015-04-09 22:06     ` Waiman Long
2015-04-09 22:06     ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 14/15] pvqspinlock: Improve slowpath performance by avoiding cmpxchg Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55 ` [PATCH v15 15/15] pvqspinlock: Add debug code to check for PV lock hash sanity Waiman Long
2015-04-07  2:55 ` Waiman Long
2015-04-07  2:55   ` Waiman Long

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1428375350-9213-10-git-send-email-Waiman.Long@hp.com \
    --to=waiman.long@hp.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=daniel@numascale.com \
    --cc=david.vrabel@citrix.com \
    --cc=doug.hatch@hp.com \
    --cc=hpa@zytor.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=oleg@redhat.com \
    --cc=paolo.bonzini@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=raghavendra.kt@linux.vnet.ibm.com \
    --cc=riel@redhat.com \
    --cc=scott.norton@hp.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.