All of lore.kernel.org
 help / color / mirror / Atom feed
From: Waiman Long <waiman.long@hp.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arch@vger.kernel.org, riel@redhat.com, x86@kernel.org,
	kvm@vger.kernel.org, konrad.wilk@oracle.com, scott.norton@hp.com,
	raghavendra.kt@linux.vnet.ibm.com, paolo.bonzini@gmail.com,
	oleg@redhat.com, linux-kernel@vger.kernel.org, mingo@redhat.com,
	david.vrabel@citrix.com, hpa@zytor.com, luto@amacapital.net,
	xen-devel@lists.xenproject.org, tglx@linutronix.de,
	paulmck@linux.vnet.ibm.com, torvalds@linux-foundation.org,
	boris.ostrovsky@oracle.com,
	virtualization@lists.linux-foundation.org, doug.hatch@hp.com
Subject: Re: [PATCH 8/9] qspinlock: Generic paravirt support
Date: Wed, 18 Mar 2015 16:50:37 -0400	[thread overview]
Message-ID: <5509E51D.7040909@hp.com> (raw)
In-Reply-To: <20150316133112.278511476@infradead.org>


[-- Attachment #1.1: Type: text/plain, Size: 10903 bytes --]

On 03/16/2015 09:16 AM, Peter Zijlstra wrote:
> Implement simple paravirt support for the qspinlock.
>
> Provide a separate (second) version of the spin_lock_slowpath for
> paravirt along with a special unlock path.
>
> The second slowpath is generated by adding a few pv hooks to the
> normal slowpath, but where those will compile away for the native
> case, they expand into special wait/wake code for the pv version.
>
> The actual MCS queue can use extra storage in the mcs_nodes[] array to
> keep track of state and therefore uses directed wakeups.
>
> The head contender has no such storage available and reverts to the
> per-cpu lock entry similar to the current kvm code. We can do a single
> enrty because any nesting will wake the vcpu and cause the lower loop
> to retry.
>
> Signed-off-by: Peter Zijlstra (Intel)<peterz@infradead.org>
> ---
>   include/asm-generic/qspinlock.h     |    3
>   kernel/locking/qspinlock.c          |   69 +++++++++++++-
>   kernel/locking/qspinlock_paravirt.h |  177 ++++++++++++++++++++++++++++++++++++
>   3 files changed, 248 insertions(+), 1 deletion(-)
>
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -118,6 +118,9 @@ static __always_inline bool virt_queue_s
>   }
>   #endif
>
> +extern void __pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +extern void __pv_queue_spin_unlock(struct qspinlock *lock);
> +
>   /*
>    * Initializier
>    */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -18,6 +18,9 @@
>    * Authors: Waiman Long<waiman.long@hp.com>
>    *          Peter Zijlstra<peterz@infradead.org>
>    */
> +
> +#ifndef _GEN_PV_LOCK_SLOWPATH
> +
>   #include<linux/smp.h>
>   #include<linux/bug.h>
>   #include<linux/cpumask.h>
> @@ -65,13 +68,21 @@
>
>   #include "mcs_spinlock.h"
>
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +#define MAX_NODES	8
> +#else
> +#define MAX_NODES	4
> +#endif
> +
>   /*
>    * Per-CPU queue node structures; we can never have more than 4 nested
>    * contexts: task, softirq, hardirq, nmi.
>    *
>    * Exactly fits one 64-byte cacheline on a 64-bit architecture.
> + *
> + * PV doubles the storage and uses the second cacheline for PV state.
>    */
> -static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
> +static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
>
>   /*
>    * We must be able to distinguish between no-tail and the tail at 0:0,
> @@ -230,6 +241,32 @@ static __always_inline void set_locked(s
>   	WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
>   }
>
> +
> +/*
> + * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
> + * all the PV callbacks.
> + */
> +
> +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
> +
> +static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
> +
> +#define pv_enabled()		false
> +
> +#define pv_init_node		__pv_init_node
> +#define pv_wait_node		__pv_wait_node
> +#define pv_kick_node		__pv_kick_node
> +
> +#define pv_wait_head		__pv_wait_head
> +
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +#define queue_spin_lock_slowpath	native_queue_spin_lock_slowpath
> +#endif
> +
> +#endif /* _GEN_PV_LOCK_SLOWPATH */
> +
>   /**
>    * queue_spin_lock_slowpath - acquire the queue spinlock
>    * @lock: Pointer to queue spinlock structure
> @@ -259,6 +296,9 @@ void queue_spin_lock_slowpath(struct qsp
>
>   	BUILD_BUG_ON(CONFIG_NR_CPUS>= (1U<<  _Q_TAIL_CPU_BITS));
>
> +	if (pv_enabled())
> +		goto queue;
> +
>   	if (virt_queue_spin_lock(lock))
>   		return;
>
> @@ -335,6 +375,7 @@ void queue_spin_lock_slowpath(struct qsp
>   	node += idx;
>   	node->locked = 0;
>   	node->next = NULL;
> +	pv_init_node(node);
>
>   	/*
>   	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
> @@ -360,6 +401,7 @@ void queue_spin_lock_slowpath(struct qsp
>   		prev = decode_tail(old);
>   		WRITE_ONCE(prev->next, node);
>
> +		pv_wait_node(node);
>   		arch_mcs_spin_lock_contended(&node->locked);
>   	}
>
> @@ -374,6 +416,7 @@ void queue_spin_lock_slowpath(struct qsp
>   	 * sequentiality; this is because the set_locked() function below
>   	 * does not imply a full barrier.
>   	 */
> +	pv_wait_head(lock);
>   	while ((val = smp_load_acquire(&lock->val.counter))&  _Q_LOCKED_PENDING_MASK)
>   		cpu_relax();
>
> @@ -406,6 +449,7 @@ void queue_spin_lock_slowpath(struct qsp
>   		cpu_relax();
>
>   	arch_mcs_spin_unlock_contended(&next->locked);
> +	pv_kick_node(next);
>
>   release:
>   	/*
> @@ -414,3 +458,26 @@ void queue_spin_lock_slowpath(struct qsp
>   	this_cpu_dec(mcs_nodes[0].count);
>   }
>   EXPORT_SYMBOL(queue_spin_lock_slowpath);
> +
> +/*
> + * Generate the paravirt code for queue_spin_unlock_slowpath().
> + */
> +#if !defined(_GEN_PV_LOCK_SLOWPATH)&&  defined(CONFIG_PARAVIRT_SPINLOCKS)
> +#define _GEN_PV_LOCK_SLOWPATH
> +
> +#undef pv_enabled
> +#define pv_enabled()	true
> +
> +#undef pv_init_node
> +#undef pv_wait_node
> +#undef pv_kick_node
> +
> +#undef pv_wait_head
> +
> +#undef queue_spin_lock_slowpath
> +#define queue_spin_lock_slowpath	__pv_queue_spin_lock_slowpath
> +
> +#include "qspinlock_paravirt.h"
> +#include "qspinlock.c"
> +
> +#endif
> --- /dev/null
> +++ b/kernel/locking/qspinlock_paravirt.h
> @@ -0,0 +1,177 @@
> +#ifndef _GEN_PV_LOCK_SLOWPATH
> +#error "do not include this file"
> +#endif
> +
> +/*
> + * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
> + * of spinning them.
> + *
> + * This relies on the architecture to provide two paravirt hypercalls:
> + *
> + *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
> + *   pv_kick(cpu)             -- wakes a suspended vcpu
> + *
> + * Using these we implement __pv_queue_spin_lock_slowpath() and
> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
> + * native_queue_spin_unlock().
> + */
> +
> +#define _Q_SLOW_VAL	(2U<<  _Q_LOCKED_OFFSET)
> +
> +enum vcpu_state {
> +	vcpu_running = 0,
> +	vcpu_halted,
> +};
> +
> +struct pv_node {
> +	struct mcs_spinlock	mcs;
> +	struct mcs_spinlock	__res[3];
> +
> +	int			cpu;
> +	u8			state;
> +};
> +
> +/*
> + * Initialize the PV part of the mcs_spinlock node.
> + */
> +static void pv_init_node(struct mcs_spinlock *node)
> +{
> +	struct pv_node *pn = (struct pv_node *)node;
> +
> +	BUILD_BUG_ON(sizeof(struct pv_node)>  5*sizeof(struct mcs_spinlock));
> +
> +	pn->cpu = smp_processor_id();
> +	pn->state = vcpu_running;
> +}
> +
> +/*
> + * Wait for node->locked to become true, halt the vcpu after a short spin.
> + * pv_kick_node() is used to wake the vcpu again.
> + */
> +static void pv_wait_node(struct mcs_spinlock *node)
> +{
> +	struct pv_node *pn = (struct pv_node *)node;
> +	int loop;
> +
> +	for (;;) {
> +		for (loop = SPIN_THRESHOLD; loop; loop--) {
> +			if (READ_ONCE(node->locked))
> +				goto done;
> +
> +			cpu_relax();
> +		}
> +
> +		/*
> +		 * Order pn->state vs pn->locked thusly:
> +		 *
> +		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
> +		 *     MB			      MB
> +		 * [L] pn->locked		[RmW] pn->state = vcpu_running
> +		 *
> +		 * Matches the xchg() from pv_kick_node().
> +		 */
> +		(void)xchg(&pn->state, vcpu_halted);
> +
> +		if (READ_ONCE(node->locked))
> +			goto done;
> +
> +		pv_wait(&pn->state, vcpu_halted);
> +	}
> +done:
> +	pn->state = vcpu_running;
> +
> +	/*
> +	 * By now our node->locked should be 1 and our caller will not actually
> +	 * spin-wait for it. We do however rely on our caller to do a
> +	 * load-acquire for us.
> +	 */
> +}
> +
> +/*
> + * Called after setting next->locked = 1, used to wake those stuck in
> + * pv_wait_node().
> + */
> +static void pv_kick_node(struct mcs_spinlock *node)
> +{
> +	struct pv_node *pn = (struct pv_node *)node;
> +
> +	/*
> +	 * Note that because node->locked is already set, this actual mcs_spinlock
> +	 * entry could be re-used already.
> +	 *
> +	 * This should be fine however, kicking people for no reason is harmless.
> +	 *
> +	 * See the comment in pv_wait_node().
> +	 */
> +	if (xchg(&pn->state, vcpu_running) == vcpu_halted)
> +		pv_kick(pn->cpu);
> +}
> +
> +static DEFINE_PER_CPU(struct qspinlock *, __pv_lock_wait);
> +
> +/*
> + * Wait for l->locked to become clear; halt the vcpu after a short spin.
> + * __pv_queue_spin_unlock() will wake us.
> + */
> +static void pv_wait_head(struct qspinlock *lock)
> +{
> +	struct __qspinlock *l = (void *)lock;
> +	int loop;
> +
> +	for (;;) {
> +		for (loop = SPIN_THRESHOLD; loop; loop--) {
> +			if (!READ_ONCE(l->locked))
> +				goto done;
> +
> +			cpu_relax();
> +		}
> +
> +		this_cpu_write(__pv_lock_wait, lock);

We may run into the same problem of needing to have 4 queue nodes per 
CPU. If an interrupt happens just after the write and before the actual 
wait and it goes through the same sequence, it will overwrite the 
__pv_lock_wait[] entry. So we may have lost wakeup. That is why the 
pvticket lock code did that just before the actual wait with interrupt 
disabled. We probably couldn't disable interrupt here. So we may need to 
move the actual write to the KVM and Xen code if we keep the current logic.

> +		/*
> +		 * __pv_lock_wait must be set before setting _Q_SLOW_VAL
> +		 *
> +		 * [S] __pv_lock_wait = lock    [RmW] l = l->locked = 0
> +		 *     MB                             MB
> +		 * [S] l->locked = _Q_SLOW_VAL  [L]   __pv_lock_wait
> +		 *
> +		 * Matches the xchg() in pv_queue_spin_unlock().
> +		 */
> +		if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL))
> +			goto done;
> +
> +		pv_wait(&l->locked, _Q_SLOW_VAL);
> +	}
> +done:
> +	this_cpu_write(__pv_lock_wait, NULL);
> +
> +	/*
> +	 * Lock is unlocked now; the caller will acquire it without waiting.
> +	 * As with pv_wait_node() we rely on the caller to do a load-acquire
> +	 * for us.
> +	 */
> +}
> +
> +/*
> + * To be used in stead of queue_spin_unlock() for paravirt locks. Wakes
> + * pv_wait_head() if appropriate.
> + */
> +void __pv_queue_spin_unlock(struct qspinlock *lock)
> +{
> +	struct __qspinlock *l = (void *)lock;
> +	int cpu;
> +
> +	if (xchg(&l->locked, 0) != _Q_SLOW_VAL)
> +		return;
> +
> +	/*
> +	 * At this point the memory pointed at by lock can be freed/reused,
> +	 * however we can still use the pointer value to search in our cpu
> +	 * array.
> +	 *
> +	 * XXX: get rid of this loop
> +	 */
> +	for_each_possible_cpu(cpu) {
> +		if (per_cpu(__pv_lock_wait, cpu) == lock)
> +			pv_kick(cpu);
> +	}
> +}

I do want to get rid of this loop too. On average, we have to scan about 
half the number of CPUs available. So it isn't that different 
performance-wise compared with my original idea of following the list 
from tail to head. And how about your idea of propagating the current 
head down the linked list?

-Longman


[-- Attachment #1.2: Type: text/html, Size: 11627 bytes --]

[-- Attachment #2: Type: text/plain, Size: 183 bytes --]

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  reply	other threads:[~2015-03-18 20:50 UTC|newest]

Thread overview: 135+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-03-16 13:16 [PATCH 0/9] qspinlock stuff -v15 Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 1/9] qspinlock: A simple generic 4-byte queue spinlock Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 2/9] qspinlock, x86: Enable x86-64 to use " Peter Zijlstra
2015-03-16 13:16   ` Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 3/9] qspinlock: Add pending bit Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 4/9] qspinlock: Extract out code snippets for the next patch Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16   ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 5/9] qspinlock: Optimize for smaller NR_CPUS Peter Zijlstra
2015-03-16 13:16   ` Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 6/9] qspinlock: Use a simple write to grab the lock Peter Zijlstra
2015-03-16 13:16   ` Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 7/9] qspinlock: Revert to test-and-set on hypervisors Peter Zijlstra
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16   ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 8/9] qspinlock: Generic paravirt support Peter Zijlstra
2015-03-16 13:16   ` Peter Zijlstra
2015-03-18 20:50   ` Waiman Long [this message]
2015-03-19 10:12     ` Peter Zijlstra
2015-03-19 10:12     ` Peter Zijlstra
2015-03-19 10:12     ` Peter Zijlstra
2015-03-19 12:25       ` Peter Zijlstra
2015-03-19 12:25         ` Peter Zijlstra
2015-03-19 13:43         ` Peter Zijlstra
2015-03-19 13:43         ` Peter Zijlstra
2015-03-19 13:43         ` Peter Zijlstra
2015-03-19 23:25         ` Waiman Long
2015-03-19 23:25         ` Waiman Long
2015-03-19 23:25         ` Waiman Long
2015-04-01 16:20         ` Waiman Long
2015-04-01 16:20         ` Waiman Long
2015-04-01 16:20           ` Waiman Long
2015-04-01 17:12           ` Peter Zijlstra
2015-04-01 17:12           ` Peter Zijlstra
2015-04-01 17:12           ` Peter Zijlstra
2015-04-01 17:42             ` Peter Zijlstra
2015-04-01 17:42               ` Peter Zijlstra
2015-04-01 18:17               ` Peter Zijlstra
2015-04-01 18:17                 ` Peter Zijlstra
2015-04-01 18:54                 ` Waiman Long
2015-04-01 18:54                   ` Waiman Long
2015-04-01 18:48                   ` Peter Zijlstra
2015-04-01 18:48                   ` Peter Zijlstra
2015-04-01 19:58                     ` Waiman Long
2015-04-01 21:03                       ` Peter Zijlstra
2015-04-01 21:03                       ` Peter Zijlstra
2015-04-01 21:03                         ` Peter Zijlstra
2015-04-02 16:28                         ` Waiman Long
2015-04-02 17:20                           ` Peter Zijlstra
2015-04-02 17:20                             ` Peter Zijlstra
2015-04-02 19:48                             ` Peter Zijlstra
2015-04-02 19:48                             ` Peter Zijlstra
2015-04-03  3:39                               ` Waiman Long
2015-04-03  3:39                                 ` Waiman Long
2015-04-03  3:39                               ` Waiman Long
2015-04-03 13:43                               ` Peter Zijlstra
2015-04-03 13:43                               ` Peter Zijlstra
2015-04-03 13:43                                 ` Peter Zijlstra
2015-04-02 19:48                             ` Peter Zijlstra
2015-04-02 17:20                           ` Peter Zijlstra
2015-04-02 16:28                         ` Waiman Long
2015-04-02 16:28                         ` Waiman Long
2015-04-01 19:58                     ` Waiman Long
2015-04-01 19:58                     ` Waiman Long
2015-04-01 18:48                   ` Peter Zijlstra
2015-04-01 18:54                 ` Waiman Long
2015-04-01 18:17               ` Peter Zijlstra
2015-04-01 17:42             ` Peter Zijlstra
2015-04-01 20:10             ` Waiman Long
2015-04-01 20:10             ` Waiman Long
2015-04-01 20:10             ` Waiman Long
2015-03-19 12:25       ` Peter Zijlstra
2015-03-18 20:50   ` Waiman Long
2015-03-16 13:16 ` Peter Zijlstra
2015-03-16 13:16 ` [PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock Peter Zijlstra
2015-03-16 13:16 ` [PATCH 9/9] qspinlock,x86,kvm: " Peter Zijlstra
2015-03-16 13:16   ` [PATCH 9/9] qspinlock, x86, kvm: " Peter Zijlstra
2015-03-19  2:45   ` Waiman Long
2015-03-19 10:01     ` Peter Zijlstra
2015-03-19 10:01     ` [PATCH 9/9] qspinlock,x86,kvm: " Peter Zijlstra
2015-03-19 10:01       ` Peter Zijlstra
2015-03-19 21:08       ` [PATCH 9/9] qspinlock, x86, kvm: " Waiman Long
2015-03-19 21:08       ` [PATCH 9/9] qspinlock,x86,kvm: " Waiman Long
2015-03-19 21:08         ` [PATCH 9/9] qspinlock, x86, kvm: " Waiman Long
2015-03-20  7:43         ` Raghavendra K T
2015-03-20  7:43         ` [PATCH 9/9] qspinlock,x86,kvm: " Raghavendra K T
2015-03-20  7:43           ` [PATCH 9/9] qspinlock, x86, kvm: " Raghavendra K T
2015-03-19  2:45   ` Waiman Long
2015-03-16 14:08 ` [PATCH 0/9] qspinlock stuff -v15 David Vrabel
2015-03-16 14:08 ` [Xen-devel] " David Vrabel
2015-03-16 14:08   ` David Vrabel
2015-03-16 14:08   ` David Vrabel
2015-03-16 14:08   ` David Vrabel
2015-03-18 20:36 ` Waiman Long
2015-03-18 20:36 ` Waiman Long
2015-03-18 20:36 ` Waiman Long
2015-03-19 18:01 ` [Xen-devel] " David Vrabel
2015-03-19 18:01 ` David Vrabel
2015-03-19 18:01   ` David Vrabel
2015-03-19 18:32   ` Peter Zijlstra
2015-03-19 18:32     ` Peter Zijlstra
2015-03-19 18:32   ` Peter Zijlstra
2015-03-19 18:01 ` David Vrabel
2015-03-25 19:47 ` Konrad Rzeszutek Wilk
2015-03-26 20:21   ` Peter Zijlstra
2015-03-26 20:21   ` Peter Zijlstra
2015-03-26 20:21     ` Peter Zijlstra
2015-03-27 14:07     ` Konrad Rzeszutek Wilk
2015-03-27 14:07     ` Konrad Rzeszutek Wilk
2015-03-27 14:07     ` Konrad Rzeszutek Wilk
2015-03-30 16:41       ` Waiman Long
2015-03-30 16:41       ` Waiman Long
2015-03-30 16:41       ` Waiman Long
2015-03-30 16:25   ` Waiman Long
2015-03-30 16:25   ` Waiman Long
2015-03-30 16:29     ` Peter Zijlstra
2015-03-30 16:29       ` Peter Zijlstra
2015-03-30 16:43       ` Waiman Long
2015-03-30 16:43       ` Waiman Long
2015-03-30 16:43         ` Waiman Long
2015-03-30 16:29     ` Peter Zijlstra
2015-03-30 16:25   ` Waiman Long
2015-03-25 19:47 ` Konrad Rzeszutek Wilk
2015-03-25 19:47 ` Konrad Rzeszutek Wilk
2015-03-27  6:40 ` Raghavendra K T
2015-03-27  6:40 ` Raghavendra K T
2015-03-27  6:40 ` Raghavendra K T

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5509E51D.7040909@hp.com \
    --to=waiman.long@hp.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=david.vrabel@citrix.com \
    --cc=doug.hatch@hp.com \
    --cc=hpa@zytor.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@amacapital.net \
    --cc=mingo@redhat.com \
    --cc=oleg@redhat.com \
    --cc=paolo.bonzini@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=raghavendra.kt@linux.vnet.ibm.com \
    --cc=riel@redhat.com \
    --cc=scott.norton@hp.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.