All of lore.kernel.org
 help / color / mirror / Atom feed
From: Don Slutz <dslutz@verizon.com>
To: xen-devel@lists.xen.org
Cc: Kevin Tian <kevin.tian@intel.com>, Keir Fraser <keir@xen.org>,
	Ian Campbell <ian.campbell@citrix.com>,
	Stefano Stabellini <stefano.stabellini@eu.citrix.com>,
	Jun Nakajima <jun.nakajima@intel.com>,
	Eddie Dong <eddie.dong@intel.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Don Slutz <dslutz@verizon.com>, Tim Deegan <tim@xen.org>,
	George Dunlap <George.Dunlap@eu.citrix.com>,
	Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>,
	Jan Beulich <jbeulich@suse.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Subject: [PATCH for-4.5 v6 11/16] Add live migration of VMware's hyper-call RPC
Date: Sat, 20 Sep 2014 14:07:22 -0400	[thread overview]
Message-ID: <1411236447-7435-12-git-send-email-dslutz@verizon.com> (raw)
In-Reply-To: <1411236447-7435-1-git-send-email-dslutz@verizon.com>

The VMware's hyper-call state is included in live migration and
save/restore.  Because the max size of the VMware guestinfo is
large, then data is compressed and expanded in the
vmport_save_domain_ctxt and vmport_load_domain_ctxt.

Signed-off-by: Don Slutz <dslutz@verizon.com>
---
v5:
      You ASSERTed that vg->key_len is 1 so you may not need the 'if'.
        That is a ASSERT(sizeof, not just ASSERT -- not changed.
      Use real errno, not -1.
        Fixed.
      No ASSERT in vmport_load_domain_ctxt
        Added.

 xen/arch/x86/hvm/vmware/vmport_rpc.c   | 309 ++++++++++++++++++++++++++++++++-
 xen/include/public/arch-x86/hvm/save.h |  39 ++++-
 2 files changed, 346 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/hvm/vmware/vmport_rpc.c b/xen/arch/x86/hvm/vmware/vmport_rpc.c
index ed779b4..f795928 100644
--- a/xen/arch/x86/hvm/vmware/vmport_rpc.c
+++ b/xen/arch/x86/hvm/vmware/vmport_rpc.c
@@ -39,7 +39,8 @@
 #include <asm/hvm/vmport.h>
 #include <asm/hvm/trace.h>
 
-#include "public/arch-x86/hvm/vmporttype.h"
+#include <public/hvm/save.h>
+#include <public/arch-x86/hvm/save.h>
 
 #include "backdoor_def.h"
 #include "guest_msg_def.h"
@@ -1262,6 +1263,312 @@ void vmport_rpc_deinit(struct domain *d)
     xfree(vs);
 }
 
+/* save and restore functions */
+
+static int vmport_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_vmport_context *ctxt;
+    struct hvm_save_descriptor *desc;
+    struct hvm_domain *hd = &d->arch.hvm_domain;
+    struct vmport_state *vs = hd->vmport_data;
+    char *p;
+    unsigned int guestinfo_size = 0;
+    unsigned int used_guestinfo = 0;
+    unsigned int used_guestinfo_jumbo = 0;
+    unsigned int chans_size;
+    unsigned int i;
+
+    /* Customized handling for entry since our entry is of variable length */
+    desc = (struct hvm_save_descriptor *)&h->data[h->cur];
+    if ( _hvm_init_entry(h, HVM_SAVE_CODE(VMPORT), 0,
+                         HVM_SAVE_LENGTH(VMPORT)) )
+        return 1;
+    ctxt = (struct hvm_vmport_context *)&h->data[h->cur];
+
+    spin_lock(&hd->vmport_lock);
+
+    ctxt->version = VMPORT_SAVE_VERSION;
+    ctxt->ping_time = vs->ping_time;
+    ctxt->open_cookie = vs->open_cookie;
+    ctxt->used_guestinfo = vs->used_guestinfo;
+    ctxt->used_guestinfo_jumbo = vs->used_guestinfo_jumbo;
+
+    p = ctxt->u.packed.packed_data;
+
+    for ( i = 0; i < VMPORT_MAX_CHANS; i++ )
+    {
+        unsigned int j;
+        unsigned int buf_max;
+
+        ctxt->u.packed.chan_ctl[i].chan = vs->chans[i].ctl;
+        buf_max = vs->chans[i].ctl.send_len;
+        if ( buf_max > VMPORT_MAX_SEND_BUF * 4 )
+            buf_max = VMPORT_MAX_SEND_BUF * 4;
+        memcpy(p, vs->chans[i].send_buf, buf_max);
+        p += buf_max;
+        for ( j = 0; j < VMPORT_MAX_BKTS; j++ )
+        {
+            ctxt->u.packed.chan_ctl[i].recv[j] = vs->chans[i].recv_bkt[j].ctl;
+            buf_max = vs->chans[i].recv_bkt[j].ctl.recv_len;
+            if ( buf_max > VMPORT_MAX_RECV_BUF * 4 )
+                buf_max = VMPORT_MAX_RECV_BUF * 4;
+            memcpy(p, vs->chans[i].recv_bkt[j].recv_buf, buf_max);
+            p += buf_max;
+        }
+        ctxt->u.packed.chan_ctl[i].jumbo = vs->chans[i].jumbo_recv_bkt.ctl;
+        buf_max = vs->chans[i].jumbo_recv_bkt.ctl.recv_len;
+        if ( buf_max > VMPORT_MAX_RECV_JUMBO_BUF * 4 )
+            buf_max = VMPORT_MAX_RECV_JUMBO_BUF * 4;
+        memcpy(p, vs->chans[i].jumbo_recv_bkt.recv_buf, buf_max);
+        p += buf_max;
+    }
+
+    chans_size = p - ctxt->u.packed.packed_data;
+
+    for ( i = 0; i < ctxt->used_guestinfo; i++ )
+    {
+        vmport_guestinfo_t *vg = vs->guestinfo[i];
+
+        if ( vg && vg->key_len )
+        {
+            guestinfo_size += sizeof(vg->key_len) + sizeof(vg->val_len) +
+                vg->key_len + vg->val_len;
+            used_guestinfo++;
+            ASSERT(sizeof(vg->key_len) == 1);
+            *p++ = (char) vg->key_len;
+            ASSERT(sizeof(vg->val_len) == 1);
+            *p++ = (char) vg->val_len;
+            if ( vg->key_len )
+            {
+                memcpy(p, vg->key_data, vg->key_len);
+                p += vg->key_len;
+                if ( vg->val_len )
+                {
+                    memcpy(p, vg->val_data, vg->val_len);
+                    p += vg->val_len;
+                }
+            }
+        }
+    }
+    ctxt->used_guestinfo = used_guestinfo;
+
+    for ( i = 0; i < ctxt->used_guestinfo_jumbo; i++ )
+    {
+        vmport_guestinfo_jumbo_t *vgj =
+            vs->guestinfo_jumbo[i];
+        if ( vgj && vgj->key_len )
+        {
+            guestinfo_size += sizeof(vgj->key_len) + sizeof(vgj->val_len) +
+                vgj->key_len + vgj->val_len;
+            used_guestinfo_jumbo++;
+            ASSERT(sizeof(vgj->key_len) == 1);
+            *p++ = (char) vgj->key_len;
+            /* This is so migation does not fail */
+            ASSERT(sizeof(vgj->val_len) == 2);
+            memcpy(p, &vgj->val_len, sizeof(vgj->val_len));
+            p += sizeof(vgj->val_len);
+            if ( vgj->key_len )
+            {
+                memcpy(p, vgj->key_data, vgj->key_len);
+                p += vgj->key_len;
+                if ( vgj->val_len )
+                {
+                    memcpy(p, vgj->val_data, vgj->val_len);
+                    p += vgj->val_len;
+                }
+            }
+        }
+    }
+    ctxt->used_guestinfo_jumbo = used_guestinfo_jumbo;
+
+    ctxt->used_guestsize = guestinfo_size;
+
+    spin_unlock(&hd->vmport_lock);
+
+#ifndef NDEBUG
+    gdprintk(XENLOG_WARNING, "chans_size=%d guestinfo_size=%d, used=%ld\n",
+             chans_size, guestinfo_size,
+             p - ctxt->u.packed.packed_data);
+#endif
+    ASSERT(p - ctxt->u.packed.packed_data == chans_size + guestinfo_size);
+    ASSERT(desc->length >= p - (char *)ctxt);
+    desc->length = p - (char *)ctxt; /* Fixup length to be right */
+    h->cur += desc->length; /* Do _hvm_write_entry */
+    ASSERT(guestinfo_size < desc->length);
+
+    return 0;
+}
+
+static int vmport_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_vmport_context *ctxt;
+    struct hvm_save_descriptor *desc;
+    struct hvm_domain *hd = &d->arch.hvm_domain;
+    struct vmport_state *vs = hd->vmport_data;
+    unsigned int i;
+    uint8_t key_len;
+    uint16_t val_len;
+    char *p;
+    vmport_guestinfo_t *vg;
+    vmport_guestinfo_jumbo_t *vgj;
+    unsigned int loop_cnt;
+    unsigned int guestinfo_size;
+    unsigned int used_guestinfo;
+    unsigned int used_guestinfo_jumbo;
+
+    if ( !vs )
+        return -ENOMEM;
+
+    /* Customized checking for entry since our entry is of variable length */
+    desc = (struct hvm_save_descriptor *)&h->data[h->cur];
+    if ( sizeof(*desc) > h->size - h->cur )
+    {
+        printk(XENLOG_G_WARNING
+               "HVM%d restore: not enough data left to read descriptor"
+               "for type %lu\n", d->domain_id,
+               HVM_SAVE_CODE(VMPORT));
+        return -E2BIG;
+    }
+    if ( desc->length + sizeof(*desc) > h->size - h->cur )
+    {
+        printk(XENLOG_G_WARNING
+               "HVM%d restore: not enough data left to read %u bytes "
+               "for type %lu\n", d->domain_id, desc->length,
+               HVM_SAVE_CODE(VMPORT));
+        return -EFBIG;
+    }
+    if ( HVM_SAVE_CODE(VMPORT) != desc->typecode ||
+         (desc->length > HVM_SAVE_LENGTH(VMPORT)) )
+    {
+        printk(XENLOG_G_WARNING
+               "HVM%d restore mismatch: expected type %lu with max length %lu, "
+               "saw type %u length %u\n", d->domain_id, HVM_SAVE_CODE(VMPORT),
+               HVM_SAVE_LENGTH(VMPORT),
+               desc->typecode, desc->length);
+        return -ESRCH;
+    }
+    h->cur += sizeof(*desc);
+    /* Checking finished */
+
+    ctxt = (struct hvm_vmport_context *)&h->data[h->cur];
+    h->cur += desc->length;
+
+    if ( ctxt->version != VMPORT_SAVE_VERSION )
+        return -EINVAL;
+
+    spin_lock(&hd->vmport_lock);
+
+    vs->ping_time = ctxt->ping_time;
+    vs->open_cookie = ctxt->open_cookie;
+    vs->used_guestinfo = ctxt->used_guestinfo;
+    vs->used_guestinfo_jumbo = ctxt->used_guestinfo_jumbo;
+    guestinfo_size = ctxt->used_guestsize;
+    used_guestinfo = ctxt->used_guestinfo;
+    used_guestinfo_jumbo = ctxt->used_guestinfo_jumbo;
+
+    p = ctxt->u.packed.packed_data;
+
+    for ( i = 0; i < VMPORT_MAX_CHANS; i++ )
+    {
+        unsigned int j;
+
+        vs->chans[i].ctl = ctxt->u.packed.chan_ctl[i].chan;
+        memcpy(vs->chans[i].send_buf, p, vs->chans[i].ctl.send_len);
+        p += vs->chans[i].ctl.send_len;
+        for ( j = 0; j < VMPORT_MAX_BKTS; j++ )
+        {
+            vs->chans[i].recv_bkt[j].ctl = ctxt->u.packed.chan_ctl[i].recv[j];
+            memcpy(vs->chans[i].recv_bkt[j].recv_buf, p,
+                   vs->chans[i].recv_bkt[j].ctl.recv_len);
+            p += vs->chans[i].recv_bkt[j].ctl.recv_len;
+        }
+        vs->chans[i].jumbo_recv_bkt.ctl = ctxt->u.packed.chan_ctl[i].jumbo;
+        memcpy(vs->chans[i].jumbo_recv_bkt.recv_buf, p,
+               vs->chans[i].jumbo_recv_bkt.ctl.recv_len);
+        p += vs->chans[i].jumbo_recv_bkt.ctl.recv_len;
+    }
+
+
+    /* keep at least 10 total and 5 empty entries */
+    loop_cnt = (vs->used_guestinfo + 5) > 10 ?
+        (vs->used_guestinfo + 5) : 10;
+    for ( i = 0; i < loop_cnt; i++ )
+    {
+        if ( !vs->guestinfo[i] )
+        {
+            vs->guestinfo[i] = xzalloc(vmport_guestinfo_t);
+        }
+        if ( i < vs->used_guestinfo
+             && guestinfo_size > 0 )
+        {
+            ASSERT(sizeof(vg->key_len) == 1);
+            key_len = (uint8_t)*p++;
+            ASSERT(sizeof(vg->val_len) == 1);
+            val_len = (uint8_t)*p++;
+            guestinfo_size -= 2;
+            if ( guestinfo_size >= key_len + val_len )
+            {
+                vg = vs->guestinfo[i];
+                if ( key_len )
+                {
+                    vg->key_len = key_len;
+                    vg->val_len = val_len;
+                    memcpy(vg->key_data, p, key_len);
+                    p += key_len;
+                    memcpy(vg->val_data, p, val_len);
+                    p += val_len;
+                    guestinfo_size -= key_len + val_len;
+                }
+            }
+        }
+    }
+    vs->used_guestinfo = loop_cnt;
+
+    /* keep at least 2 total and 1 empty entries */
+    loop_cnt = (vs->used_guestinfo_jumbo + 1) > 2 ?
+        (vs->used_guestinfo_jumbo + 1) : 2;
+    for ( i = 0; i < loop_cnt; i++ )
+    {
+        if ( !vs->guestinfo_jumbo[i] )
+        {
+            vs->guestinfo_jumbo[i] = xzalloc(vmport_guestinfo_jumbo_t);
+        }
+        if ( i < vs->used_guestinfo_jumbo
+             && guestinfo_size > 0 )
+        {
+            ASSERT(sizeof(vgj->key_len) == 1);
+            key_len = (uint8_t)*p++;
+            /* This is so migation does not fail */
+            ASSERT(sizeof(vgj->val_len) == 2);
+            memcpy(&val_len, p, 2);
+            p += 2;
+            guestinfo_size -= 3;
+            if ( guestinfo_size >= key_len + val_len )
+            {
+                vgj = vs->guestinfo_jumbo[i];
+                if ( key_len )
+                {
+                    vgj->key_len = key_len;
+                    vgj->val_len = val_len;
+                    memcpy(vgj->key_data, p, key_len);
+                    p += key_len;
+                    memcpy(vgj->val_data, p, val_len);
+                    p += val_len;
+                    guestinfo_size -= key_len + val_len;
+                }
+            }
+        }
+    }
+    vs->used_guestinfo_jumbo = loop_cnt;
+
+    spin_unlock(&hd->vmport_lock);
+
+    return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(VMPORT, vmport_save_domain_ctxt,
+                          vmport_load_domain_ctxt, 1, HVMSR_PER_DOM);
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h
index 16d85a3..7cdcb8f 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -26,6 +26,8 @@
 #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__
 #define __XEN_PUBLIC_HVM_SAVE_X86_H__
 
+#include "vmporttype.h"
+
 /* 
  * Save/restore header: general info about the save file. 
  */
@@ -610,9 +612,44 @@ struct hvm_msr {
 
 #define CPU_MSR_CODE  20
 
+/*
+ * VMware context.
+ */
+struct hvm_vmport_context {
+    uint32_t version;
+    uint32_t used_guestsize;
+    uint64_t ping_time;
+    uint32_t open_cookie;
+    uint32_t used_guestinfo;
+    uint32_t used_guestinfo_jumbo;
+    union {
+        struct {
+            vmport_channel_t chans[VMPORT_MAX_CHANS];
+            vmport_guestinfo_t guestinfo[VMPORT_MAX_NUM_KEY];
+            vmport_guestinfo_jumbo_t guestinfo_jumbo[VMPORT_MAX_NUM_JUMBO_KEY];
+        } full;
+        struct {
+            struct {
+                vmport_channel_control_t chan;
+                vmport_bucket_control_t recv[VMPORT_MAX_BKTS];
+                vmport_bucket_control_t jumbo;
+            } chan_ctl[VMPORT_MAX_CHANS];
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+            char packed_data[];
+#elif defined(__GNUC__)
+            char packed_data[0];
+#else
+            char packed_data[1 /* Variable length */];
+#endif
+        } packed;
+    } u;
+};
+
+DECLARE_HVM_SAVE_TYPE(VMPORT, 21, struct hvm_vmport_context);
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 20
+#define HVM_SAVE_CODE_MAX 21
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
-- 
1.8.4

  parent reply	other threads:[~2014-09-20 18:07 UTC|newest]

Thread overview: 93+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-20 18:07 [PATCH for-4.5 v6 00/16] Xen VMware tools support Don Slutz
2014-09-20 18:07 ` [PATCH for-4.5 v6 01/16] xen: Add support for VMware cpuid leaves Don Slutz
2014-09-22 11:49   ` Andrew Cooper
2014-09-22 16:53     ` Don Slutz
2014-09-24 14:33   ` George Dunlap
2014-09-20 18:07 ` [PATCH for-4.5 v6 02/16] tools: Add vmware_hw support Don Slutz
2014-09-22 13:34   ` Ian Campbell
2014-09-22 22:08     ` Don Slutz
2014-09-24 14:44   ` George Dunlap
2014-09-24 21:06     ` Don Slutz
2014-09-20 18:07 ` [PATCH for-4.5 v6 03/16] vmware: Add VMware provided include files Don Slutz
2014-09-20 18:07 ` [PATCH for-4.5 v6 04/16] xen: Add vmware_port support Don Slutz
2014-09-23 17:16   ` Boris Ostrovsky
2014-09-24  8:28     ` Jan Beulich
2014-09-26 19:09     ` Don Slutz
2014-09-24 16:01   ` George Dunlap
2014-09-24 16:48     ` Don Slutz
2014-09-24 17:42       ` Andrew Cooper
2014-09-20 18:07 ` [PATCH for-4.5 v6 05/16] tools: " Don Slutz
2014-09-22 13:41   ` Ian Campbell
2014-09-22 16:34     ` Andrew Cooper
2014-09-22 21:22       ` Don Slutz
2014-09-24 16:24         ` George Dunlap
2014-09-24 18:25           ` Don Slutz
2014-09-22 16:42     ` Don Slutz
2014-09-23 12:20       ` Ian Campbell
2014-09-24 16:31         ` Don Slutz
2014-09-24 16:44           ` George Dunlap
2014-09-24 18:29             ` Don Slutz
2014-09-25 11:24           ` Ian Campbell
2014-09-25 14:17             ` George Dunlap
2014-09-25 14:21               ` Ian Campbell
2014-09-26 19:19             ` Don Slutz
2014-09-20 18:07 ` [PATCH for-4.5 v6 06/16] xen: Convert vmware_port to xentrace usage Don Slutz
2014-09-24 17:27   ` George Dunlap
2014-09-24 19:07     ` Don Slutz
2014-09-25 15:14       ` George Dunlap
2014-09-29 18:10         ` Don Slutz
2014-09-20 18:07 ` [PATCH for-4.5 v6 07/16] tools: " Don Slutz
2014-09-25 15:18   ` George Dunlap
2014-09-20 18:07 ` [PATCH for-4.5 v6 08/16] xen: Add limited support of VMware's hyper-call rpc Don Slutz
2014-09-22 13:47   ` Ian Campbell
2014-09-22 21:18     ` Don Slutz
2014-09-23 12:34       ` Ian Campbell
2014-09-23 22:03         ` Slutz, Donald Christopher
2014-09-25 16:28     ` George Dunlap
2014-09-20 18:07 ` [PATCH for-4.5 v6 09/16] tools: " Don Slutz
2014-09-22 13:52   ` Ian Campbell
2014-09-22 21:32     ` Don Slutz
2014-09-23 12:35       ` Ian Campbell
2014-09-20 18:07 ` [PATCH for-4.5 v6 10/16] Add VMware tool's triggers Don Slutz
2014-09-20 18:07 ` Don Slutz [this message]
2014-09-20 18:07 ` [PATCH for-4.5 v6 12/16] Add dump of HVM_SAVE_CODE(VMPORT) to xen-hvmctx Don Slutz
2014-09-20 18:07 ` [OPTIONAL][PATCH for-4.5 v6 13/16] Add xen-hvm-param Don Slutz
2014-09-20 18:07 ` [OPTIONAL][PATCH for-4.5 v6 14/16] Add xen-vmware-guestinfo Don Slutz
2014-09-20 18:07 ` [OPTIONAL][PATCH for-4.5 v6 15/16] Add xen-list-vmware-guestinfo Don Slutz
2014-09-20 18:07 ` [OPTIONAL][PATCH for-4.5 v6 16/16] Add xen-hvm-send-trigger Don Slutz
2014-09-22 13:56 ` [PATCH for-4.5 v6 00/16] Xen VMware tools support Ian Campbell
2014-09-22 15:19   ` George Dunlap
2014-09-22 15:34     ` Ian Campbell
2014-09-22 15:38       ` George Dunlap
2014-09-22 15:50         ` Ian Campbell
2014-09-22 15:55           ` George Dunlap
2014-09-22 17:19             ` Don Slutz
2014-09-22 22:00               ` Tian, Kevin
2014-09-23 12:30               ` Ian Campbell
2014-09-23 12:35                 ` George Dunlap
2014-09-23 12:40                   ` Ian Campbell
2014-09-24 15:52                 ` George Dunlap
2014-09-24 18:09                   ` Don Slutz
2014-09-24 17:19                 ` Don Slutz
2014-09-24 20:21                   ` Konrad Rzeszutek Wilk
2014-09-26 19:03                     ` Don Slutz
2014-09-26 19:28                       ` Konrad Rzeszutek Wilk
2014-09-25 11:35                   ` Ian Campbell
2014-09-22 16:18         ` Jan Beulich
2014-09-22 18:32           ` Don Slutz
2014-09-25 10:37           ` Tim Deegan
2014-09-26 20:00             ` Don Slutz
2014-09-29  6:50               ` Jan Beulich
2014-09-29 13:27                 ` George Dunlap
2014-09-29 13:49                   ` Jan Beulich
2014-09-29 23:13                   ` Don Slutz
2014-09-30  7:05                     ` Jan Beulich
2014-09-30 10:02                       ` George Dunlap
2014-09-30 22:11                         ` Slutz, Donald Christopher
2014-09-30 10:09                     ` George Dunlap
2014-09-30 22:23                       ` Slutz, Donald Christopher
2014-10-02 10:05               ` Tim Deegan
2014-10-02 19:20                 ` Don Slutz
2014-10-03  7:09                   ` Tim Deegan
2014-09-22 15:52       ` Andrew Cooper
2014-09-22 18:39         ` Don Slutz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1411236447-7435-12-git-send-email-dslutz@verizon.com \
    --to=dslutz@verizon.com \
    --cc=Aravind.Gopalakrishnan@amd.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=eddie.dong@intel.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.