All of lore.kernel.org
 help / color / mirror / Atom feed
From: Waiman Long <Waiman.Long@hp.com>
To: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Peter Zijlstra <peterz@infradead.org>
Cc: linux-arch@vger.kernel.org, Waiman Long <Waiman.Long@hp.com>,
	Rik van Riel <riel@redhat.com>,
	Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>,
	kvm@vger.kernel.org,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	Daniel J Blueman <daniel@numascale.com>,
	x86@kernel.org, Paolo Bonzini <paolo.bonzini@gmail.com>,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	Scott J Norton <scott.norton@hp.com>,
	David Vrabel <david.vrabel@citrix.com>,
	Oleg Nesterov <oleg@redhat.com>,
	xen-devel@lists.xenproject.org,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Douglas Hatch <doug.hatch@hp.com>
Subject: [PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
Date: Fri, 24 Apr 2015 14:56:37 -0400	[thread overview]
Message-ID: <1429901803-29771-9-git-send-email-Waiman.Long__43171.5411869392$1429901990$gmane$org@hp.com> (raw)
In-Reply-To: <1429901803-29771-1-git-send-email-Waiman.Long@hp.com>

Provide a separate (second) version of the spin_lock_slowpath for
paravirt along with a special unlock path.

The second slowpath is generated by adding a few pv hooks to the
normal slowpath, but where those will compile away for the native
case, they expand into special wait/wake code for the pv version.

The actual MCS queue can use extra storage in the mcs_nodes[] array to
keep track of state and therefore uses directed wakeups.

The head contender has no such storage directly visible to the
unlocker.  So the unlocker searches a hash table with open addressing
using a simple binary Galois linear feedback shift register.

Signed-off-by: Waiman Long <Waiman.Long@hp.com>
---
 kernel/locking/qspinlock.c          |   68 +++++++-
 kernel/locking/qspinlock_paravirt.h |  324 +++++++++++++++++++++++++++++++++++
 2 files changed, 391 insertions(+), 1 deletions(-)
 create mode 100644 kernel/locking/qspinlock_paravirt.h

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index fc2e5ab..c009120 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -18,6 +18,9 @@
  * Authors: Waiman Long <waiman.long@hp.com>
  *          Peter Zijlstra <peterz@infradead.org>
  */
+
+#ifndef _GEN_PV_LOCK_SLOWPATH
+
 #include <linux/smp.h>
 #include <linux/bug.h>
 #include <linux/cpumask.h>
@@ -65,13 +68,21 @@
 
 #include "mcs_spinlock.h"
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define MAX_NODES	8
+#else
+#define MAX_NODES	4
+#endif
+
 /*
  * Per-CPU queue node structures; we can never have more than 4 nested
  * contexts: task, softirq, hardirq, nmi.
  *
  * Exactly fits one 64-byte cacheline on a 64-bit architecture.
+ *
+ * PV doubles the storage and uses the second cacheline for PV state.
  */
-static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
+static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 
 /*
  * We must be able to distinguish between no-tail and the tail at 0:0,
@@ -220,6 +231,32 @@ static __always_inline void set_locked(struct qspinlock *lock)
 	WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
 }
 
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+					   struct mcs_spinlock *node) { }
+
+#define pv_enabled()		false
+
+#define pv_init_node		__pv_init_node
+#define pv_wait_node		__pv_wait_node
+#define pv_kick_node		__pv_kick_node
+#define pv_wait_head		__pv_wait_head
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define queue_spin_lock_slowpath	native_queue_spin_lock_slowpath
+#endif
+
+#endif /* _GEN_PV_LOCK_SLOWPATH */
+
 /**
  * queue_spin_lock_slowpath - acquire the queue spinlock
  * @lock: Pointer to queue spinlock structure
@@ -249,6 +286,9 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 
 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
+	if (pv_enabled())
+		goto queue;
+
 	if (virt_queue_spin_lock(lock))
 		return;
 
@@ -325,6 +365,7 @@ queue:
 	node += idx;
 	node->locked = 0;
 	node->next = NULL;
+	pv_init_node(node);
 
 	/*
 	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -350,6 +391,7 @@ queue:
 		prev = decode_tail(old);
 		WRITE_ONCE(prev->next, node);
 
+		pv_wait_node(node);
 		arch_mcs_spin_lock_contended(&node->locked);
 	}
 
@@ -365,6 +407,7 @@ queue:
 	 * does not imply a full barrier.
 	 *
 	 */
+	pv_wait_head(lock, node);
 	while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
 		cpu_relax();
 
@@ -397,6 +440,7 @@ queue:
 		cpu_relax();
 
 	arch_mcs_spin_unlock_contended(&next->locked);
+	pv_kick_node(next);
 
 release:
 	/*
@@ -405,3 +449,25 @@ release:
 	this_cpu_dec(mcs_nodes[0].count);
 }
 EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queue_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef  pv_enabled
+#define pv_enabled()	true
+
+#undef pv_init_node
+#undef pv_wait_node
+#undef pv_kick_node
+#undef pv_wait_head
+
+#undef  queue_spin_lock_slowpath
+#define queue_spin_lock_slowpath	__pv_queue_spin_lock_slowpath
+
+#include "qspinlock_paravirt.h"
+#include "qspinlock.c"
+
+#endif
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
new file mode 100644
index 0000000..084e5c1
--- /dev/null
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -0,0 +1,324 @@
+#ifndef _GEN_PV_LOCK_SLOWPATH
+#error "do not include this file"
+#endif
+
+#include <linux/hash.h>
+#include <linux/bootmem.h>
+
+/*
+ * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
+ * of spinning them.
+ *
+ * This relies on the architecture to provide two paravirt hypercalls:
+ *
+ *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
+ *   pv_kick(cpu)             -- wakes a suspended vcpu
+ *
+ * Using these we implement __pv_queue_spin_lock_slowpath() and
+ * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
+ * native_queue_spin_unlock().
+ */
+
+#define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
+
+enum vcpu_state {
+	vcpu_running = 0,
+	vcpu_halted,
+};
+
+struct pv_node {
+	struct mcs_spinlock	mcs;
+	struct mcs_spinlock	__res[3];
+
+	int			cpu;
+	u8			state;
+};
+
+/*
+ * Lock and MCS node addresses hash table for fast lookup
+ *
+ * Hashing is done on a per-cacheline basis to minimize the need to access
+ * more than one cacheline.
+ *
+ * Dynamically allocate a hash table big enough to hold at least 4X the
+ * number of possible cpus in the system. Allocation is done on page
+ * granularity. So the minimum number of hash buckets should be at least
+ * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
+ *
+ * Since we should not be holding locks from NMI context (very rare indeed) the
+ * max load factor is 0.75, which is around the point where open addressing
+ * breaks down.
+ *
+ */
+#define PV_HE_PER_LINE	(SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
+#define PV_HB_MIN	(PAGE_SIZE / sizeof(struct pv_hash_bucket))
+
+struct pv_hash_entry {
+	struct qspinlock *lock;
+	struct pv_node   *node;
+};
+
+struct pv_hash_bucket {
+	struct pv_hash_entry ent[PV_HE_PER_LINE];
+};
+
+static struct pv_hash_bucket *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+/*
+ * Allocate memory for the PV qspinlock hash buckets
+ *
+ * This function should be called from the paravirt spinlock initialization
+ * routine.
+ */
+void __init __pv_init_lock_hash(void)
+{
+	int pv_hash_size = 4 * num_possible_cpus() / PV_HE_PER_LINE;
+
+	if (pv_hash_size < PV_HB_MIN)
+		pv_hash_size = PV_HB_MIN;
+	/*
+	 * Allocate space from bootmem which should be page-size aligned
+	 * and hence cacheline aligned.
+	 */
+	pv_lock_hash = alloc_large_system_hash("PV qspinlock",
+					       sizeof(struct pv_hash_bucket),
+					       pv_hash_size, 0, HASH_EARLY,
+					       &pv_lock_hash_bits, NULL,
+					       pv_hash_size, pv_hash_size);
+}
+
+static inline struct qspinlock **
+pv_hash(struct qspinlock *lock, struct pv_node *node)
+{
+	unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits);
+	struct pv_hash_entry *he, *end;
+
+	init_hash = hash;
+	for (;;) {
+		he = pv_lock_hash[hash].ent;
+		for (end = he + PV_HE_PER_LINE; he < end; he++) {
+			if (!cmpxchg(&he->lock, NULL, lock)) {
+				/*
+				 * We haven't set the _Q_SLOW_VAL yet. So
+				 * the order of writing doesn't matter.
+				 */
+				WRITE_ONCE(he->node, node);
+				goto done;
+			}
+		}
+		if (++hash >= (1 << pv_lock_hash_bits))
+			hash = 0;
+		BUG_ON(hash == init_hash);
+	}
+
+done:
+	return &he->lock;
+}
+
+static inline struct pv_node *pv_hash_find(struct qspinlock *lock)
+{
+	unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits);
+	struct pv_hash_entry *he, *end;
+	struct pv_node *node = NULL;
+
+	init_hash = hash;
+	for (;;) {
+		he = pv_lock_hash[hash].ent;
+		for (end = he + PV_HE_PER_LINE; he < end; he++) {
+			struct qspinlock *l = READ_ONCE(he->lock);
+
+			if (l == lock) {
+				node = READ_ONCE(he->node);
+				goto done;
+			}
+		}
+
+		if (++hash >= (1 << pv_lock_hash_bits))
+			hash = 0;
+		BUG_ON(hash == init_hash);
+	}
+done:
+	/*
+	 * Clear the hash entry before returning the PV node address
+	 */
+	WRITE_ONCE(he->lock, NULL);
+	return node;
+}
+
+/*
+ * PV version of the unlock function to be used in stead of
+ * queue_spin_unlock().
+ */
+__visible void __pv_queue_spin_unlock(struct qspinlock *lock)
+{
+	struct __qspinlock *l = (void *)lock;
+	struct pv_node *node;
+
+	if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+		return;
+
+	/*
+	 * The queue head has been halted. Need to locate it and wake it up.
+	 */
+	node = pv_hash_find(lock);
+	smp_store_release(&l->locked, 0);
+
+	/*
+	 * At this point the memory pointed at by lock can be freed/reused,
+	 * however we can still use the PV node to kick the CPU.
+	 */
+	if (READ_ONCE(node->state) == vcpu_halted)
+		pv_kick(node->cpu);
+}
+/*
+ * Include the architecture specific callee-save thunk of the
+ * __pv_queue_spin_unlock(). This thunk is put together with
+ * __pv_queue_spin_unlock() near the top of the file to make sure
+ * that the callee-save thunk and the real unlock function are close
+ * to each other sharing consecutive instruction cachelines.
+ */
+#include <asm/qspinlock_paravirt.h>
+
+/*
+ * Initialize the PV part of the mcs_spinlock node.
+ */
+static void pv_init_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+
+	BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
+
+	pn->cpu = smp_processor_id();
+	pn->state = vcpu_running;
+}
+
+/*
+ * Wait for node->locked to become true, halt the vcpu after a short spin.
+ * pv_kick_node() is used to wake the vcpu again.
+ */
+static void pv_wait_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+	int loop;
+
+	for (;;) {
+		for (loop = SPIN_THRESHOLD; loop; loop--) {
+			if (READ_ONCE(node->locked))
+				return;
+			cpu_relax();
+		}
+
+		/*
+		 * Order pn->state vs pn->locked thusly:
+		 *
+		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
+		 *     MB			      MB
+		 * [L] pn->locked		[RmW] pn->state = vcpu_running
+		 *
+		 * Matches the xchg() from pv_kick_node().
+		 */
+		(void)xchg(&pn->state, vcpu_halted);
+
+		if (!READ_ONCE(node->locked))
+			pv_wait(&pn->state, vcpu_halted);
+
+		/*
+		 * Reset the vCPU state to avoid unncessary CPU kicking
+		 */
+		WRITE_ONCE(pn->state, vcpu_running);
+
+		/*
+		 * If the locked flag is still not set after wakeup, it is a
+		 * spurious wakeup and the vCPU should wait again. However,
+		 * there is a pretty high overhead for CPU halting and kicking.
+		 * So it is better to spin for a while in the hope that the
+		 * MCS lock will be released soon.
+		 */
+	}
+	/*
+	 * By now our node->locked should be 1 and our caller will not actually
+	 * spin-wait for it. We do however rely on our caller to do a
+	 * load-acquire for us.
+	 */
+}
+
+/*
+ * Called after setting next->locked = 1, used to wake those stuck in
+ * pv_wait_node().
+ */
+static void pv_kick_node(struct mcs_spinlock *node)
+{
+	struct pv_node *pn = (struct pv_node *)node;
+
+	/*
+	 * Note that because node->locked is already set, this actual
+	 * mcs_spinlock entry could be re-used already.
+	 *
+	 * This should be fine however, kicking people for no reason is
+	 * harmless.
+	 *
+	 * See the comment in pv_wait_node().
+	 */
+	if (xchg(&pn->state, vcpu_running) == vcpu_halted)
+		pv_kick(pn->cpu);
+}
+
+/*
+ * Wait for l->locked to become clear; halt the vcpu after a short spin.
+ * __pv_queue_spin_unlock() will wake us.
+ */
+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
+{
+	struct __qspinlock *l = (void *)lock;
+	struct qspinlock **lp;
+	struct pv_node *pn = (struct pv_node *)node;
+	int loop;
+
+	for (loop = SPIN_THRESHOLD; loop; loop--) {
+		if (!READ_ONCE(l->locked))
+			return;
+		cpu_relax();
+	}
+
+	WRITE_ONCE(pn->state, vcpu_halted);
+	lp = pv_hash(lock, pn);
+	/*
+	 * lp must be set before setting _Q_SLOW_VAL
+	 *
+	 * [S] lp = lock                [RmW] l = l->locked = 0
+	 *     MB                             MB
+	 * [S] l->locked = _Q_SLOW_VAL  [L]   lp
+	 *
+	 * Matches the cmpxchg() in pv_queue_spin_unlock().
+	 */
+	if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
+		/*
+		 * The lock is free and _Q_SLOW_VAL has never been
+		 * set. Need to clear the hash bucket before getting
+		 * the lock.
+		 */
+		WRITE_ONCE(*lp, NULL);
+		return;
+	}
+
+	/*
+	 * The unlocker should have freed the lock before kicking the CPU.
+	 * So if the lock is still not free, it is a spurious wakeup and
+	 * so the vCPU should wait again after spinning for a while.
+	 */
+	for (;;) {
+		pv_wait(&l->locked, _Q_SLOW_VAL);
+		for (loop = SPIN_THRESHOLD; loop; loop--) {
+			if (!READ_ONCE(l->locked))
+				return;
+			cpu_relax();
+		}
+	}
+
+	/*
+	 * Lock is unlocked now; the caller will acquire it without waiting.
+	 * As with pv_wait_node() we rely on the caller to do a load-acquire
+	 * for us.
+	 */
+}
-- 
1.7.1

  parent reply	other threads:[~2015-04-24 18:56 UTC|newest]

Thread overview: 90+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-24 18:56 [PATCH v16 00/14] qspinlock: a 4-byte queue spinlock with PV support Waiman Long
2015-04-24 18:56 ` [PATCH v16 01/14] qspinlock: A simple generic 4-byte queue spinlock Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:25   ` [tip:locking/core] locking/qspinlock: Introduce a simple generic 4-byte queued spinlock tip-bot for Waiman Long
2015-04-24 18:56 ` [PATCH v16 02/14] qspinlock, x86: Enable x86-64 to use queue spinlock Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:25   ` [tip:locking/core] locking/qspinlock, x86: Enable x86-64 to use queued spinlocks tip-bot for Waiman Long
2015-04-24 18:56 ` [PATCH v16 03/14] qspinlock: Add pending bit Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:26   ` [tip:locking/core] locking/qspinlock: " tip-bot for Peter Zijlstra (Intel)
2015-04-24 18:56 ` [PATCH v16 04/14] qspinlock: Extract out code snippets for the next patch Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:26   ` [tip:locking/core] locking/qspinlock: " tip-bot for Waiman Long
2015-04-24 18:56 ` [PATCH v16 05/14] qspinlock: Optimize for smaller NR_CPUS Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:26   ` [tip:locking/core] locking/qspinlock: " tip-bot for Peter Zijlstra (Intel)
2015-04-24 18:56 ` [PATCH v16 06/14] qspinlock: Use a simple write to grab the lock Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:27   ` [tip:locking/core] locking/qspinlock: " tip-bot for Waiman Long
2015-04-24 18:56 ` [PATCH v16 07/14] qspinlock: Revert to test-and-set on hypervisors Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:27   ` [tip:locking/core] locking/qspinlock: " tip-bot for Peter Zijlstra (Intel)
2015-04-24 18:56 ` [PATCH v16 07/14] qspinlock: " Waiman Long
2015-04-24 18:56 ` [PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-04 14:20   ` Peter Zijlstra
2015-05-04 14:20   ` Peter Zijlstra
2015-05-04 14:20   ` Peter Zijlstra
2015-05-04 17:15     ` Waiman Long
2015-05-04 17:15     ` Waiman Long
2015-05-04 17:15     ` Waiman Long
2015-05-08 13:27   ` [tip:locking/core] locking/pvqspinlock: " tip-bot for Waiman Long
2015-04-24 18:56 ` Waiman Long [this message]
2015-04-24 18:56 ` [PATCH v16 09/14] pvqspinlock, x86: Implement the paravirt qspinlock call patching Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:27   ` [tip:locking/core] locking/pvqspinlock, " tip-bot for Peter Zijlstra (Intel)
2015-05-30  4:09     ` Sasha Levin
2015-05-31 18:29       ` Waiman Long
2015-04-24 18:56 ` [PATCH v16 09/14] pvqspinlock, " Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56 ` [PATCH v16 10/14] pvqspinlock, x86: Enable PV qspinlock for KVM Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:28   ` [tip:locking/core] locking/pvqspinlock, " tip-bot for Waiman Long
2015-04-24 18:56 ` [PATCH v16 11/14] pvqspinlock, x86: Enable PV qspinlock for Xen Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-05-08 13:28   ` [tip:locking/core] locking/pvqspinlock, " tip-bot for David Vrabel
2015-04-24 18:56 ` [PATCH v16 12/14] pvqspinlock: Only kick CPU at unlock time Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56 ` [PATCH v16 13/14] pvqspinlock: Improve slowpath performance by avoiding cmpxchg Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-24 18:56 ` Waiman Long
2015-04-29 18:11   ` Peter Zijlstra
2015-04-29 18:27     ` Linus Torvalds
2015-04-29 18:27     ` Linus Torvalds
2015-04-29 18:27       ` Linus Torvalds
2015-04-30 18:56       ` Waiman Long
2015-04-30 18:56       ` Waiman Long
2015-04-30 18:56         ` Waiman Long
2015-04-30 18:56         ` Waiman Long
2015-04-30 18:56       ` Waiman Long
2015-04-30 18:49     ` Waiman Long
2015-04-30 18:49     ` Waiman Long
2015-04-30 18:49       ` Waiman Long
2015-05-04 14:05       ` Peter Zijlstra
2015-05-04 14:05       ` Peter Zijlstra
2015-05-04 14:05         ` Peter Zijlstra
2015-05-04 17:18         ` Waiman Long
2015-05-04 17:18         ` Waiman Long
2015-05-04 17:18         ` Waiman Long
2015-04-29 18:11   ` Peter Zijlstra
2015-04-29 18:11   ` Peter Zijlstra
2015-04-24 18:56 ` [PATCH v16 14/14] pvqspinlock: Collect slowpath lock statistics Waiman Long
2015-04-24 18:56   ` Waiman Long
2015-04-24 18:56 ` Waiman Long

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='1429901803-29771-9-git-send-email-Waiman.Long__43171.5411869392$1429901990$gmane$org@hp.com' \
    --to=waiman.long@hp.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=daniel@numascale.com \
    --cc=david.vrabel@citrix.com \
    --cc=doug.hatch@hp.com \
    --cc=hpa@zytor.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=oleg@redhat.com \
    --cc=paolo.bonzini@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=raghavendra.kt@linux.vnet.ibm.com \
    --cc=riel@redhat.com \
    --cc=scott.norton@hp.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.