xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Andrii Anisov <andrii.anisov@gmail.com>
To: xen-devel@lists.xen.org
Cc: "Stefano Stabellini" <sstabellini@kernel.org>,
	"Andrii Anisov" <andrii_anisov@epam.com>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"George Dunlap" <George.Dunlap@eu.citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Ian Jackson" <ian.jackson@eu.citrix.com>,
	"Tim Deegan" <tim@xen.org>, "Julien Grall" <julien.grall@arm.com>,
	"Jan Beulich" <jbeulich@suse.com>,
	xen-devel@lists.xenproject.org, "Wei Liu" <wei.liu2@citrix.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v2 2/2] xen: implement VCPUOP_register_runstate_phys_memory_area
Date: Tue, 23 Apr 2019 11:10:26 +0300	[thread overview]
Message-ID: <1556007026-31057-3-git-send-email-andrii.anisov@gmail.com> (raw)
Message-ID: <20190423081026.ioTxYbVRP9p2eLLHr7sv7Qhi_HbRaPTLNffsZ6-C4gU@z> (raw)
In-Reply-To: <1556007026-31057-1-git-send-email-andrii.anisov@gmail.com>

From: Andrii Anisov <andrii_anisov@epam.com>

VCPUOP_register_runstate_phys_memory_area is implemented via runstate
area mapping.

Signed-off-by: Andrii Anisov <andrii_anisov@epam.com>
---
 xen/arch/arm/domain.c        |  62 +++++++++++++++++--------
 xen/arch/x86/domain.c        | 105 +++++++++++++++++++++++++++++++------------
 xen/common/domain.c          |  80 ++++++++++++++++++++++++++++++++-
 xen/include/asm-arm/domain.h |   2 +
 xen/include/xen/domain.h     |   2 +
 xen/include/xen/sched.h      |   8 ++++
 6 files changed, 210 insertions(+), 49 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 6dc633e..8e24e63 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -275,32 +275,55 @@ static void ctxt_switch_to(struct vcpu *n)
 }
 
 /* Update per-VCPU guest runstate shared memory area (if registered). */
-static void update_runstate_area(struct vcpu *v)
+void update_runstate_area(struct vcpu *v)
 {
-    void __user *guest_handle = NULL;
+    if ( !guest_handle_is_null(runstate_guest(v)) )
+    {
+        void __user *guest_handle = NULL;
+        if ( VM_ASSIST(v->domain, runstate_update_flag) )
+        {
+            guest_handle = &v->runstate_guest.p->state_entry_time + 1;
+            guest_handle--;
+            v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+            __raw_copy_to_guest(guest_handle,
+                                (void *)(&v->runstate.state_entry_time + 1) - 1,
+                                1);
+            smp_wmb();
+        }
 
-    if ( guest_handle_is_null(runstate_guest(v)) )
-        return;
+        __copy_to_guest(runstate_guest(v), &v->runstate, 1);
 
-    if ( VM_ASSIST(v->domain, runstate_update_flag) )
-    {
-        guest_handle = &v->runstate_guest.p->state_entry_time + 1;
-        guest_handle--;
-        v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
-        __raw_copy_to_guest(guest_handle,
-                            (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
-        smp_wmb();
+        if ( guest_handle )
+        {
+            v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+            smp_wmb();
+            __raw_copy_to_guest(guest_handle,
+                                (void *)(&v->runstate.state_entry_time + 1) - 1,
+                                1);
+        }
     }
 
-    __copy_to_guest(runstate_guest(v), &v->runstate, 1);
-
-    if ( guest_handle )
+    spin_lock(&v->mapped_runstate_lock);
+    if ( v->mapped_runstate )
     {
-        v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
-        smp_wmb();
-        __raw_copy_to_guest(guest_handle,
-                            (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+        if ( VM_ASSIST(v->domain, runstate_update_flag) )
+        {
+            v->mapped_runstate->state_entry_time |= XEN_RUNSTATE_UPDATE;
+            smp_wmb();
+            v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+        }
+
+        memcpy(v->mapped_runstate, &v->runstate, sizeof(v->runstate));
+
+        if ( VM_ASSIST(v->domain, runstate_update_flag) )
+        {
+            v->mapped_runstate->state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+            smp_wmb();
+            v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+        }
     }
+    spin_unlock(&v->mapped_runstate_lock);
+
 }
 
 static void schedule_tail(struct vcpu *prev)
@@ -998,6 +1021,7 @@ long do_arm_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) a
     {
         case VCPUOP_register_vcpu_info:
         case VCPUOP_register_runstate_memory_area:
+        case VCPUOP_register_runstate_phys_memory_area:
             return do_vcpu_op(cmd, vcpuid, arg);
         default:
             return -EINVAL;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 9eaa978..46c2219 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1558,51 +1558,98 @@ void paravirt_ctxt_switch_to(struct vcpu *v)
         wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
 }
 
-/* Update per-VCPU guest runstate shared memory area (if registered). */
-bool update_runstate_area(struct vcpu *v)
+static void update_mapped_runstate_area_native(struct vcpu *v)
 {
-    bool rc;
-    struct guest_memory_policy policy = { .nested_guest_mode = false };
-    void __user *guest_handle = NULL;
-
-    if ( guest_handle_is_null(runstate_guest(v)) )
-        return true;
-
-    update_guest_memory_policy(v, &policy);
-
     if ( VM_ASSIST(v->domain, runstate_update_flag) )
     {
-        guest_handle = has_32bit_shinfo(v->domain)
-            ? &v->runstate_guest.compat.p->state_entry_time + 1
-            : &v->runstate_guest.native.p->state_entry_time + 1;
-        guest_handle--;
         v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
-        __raw_copy_to_guest(guest_handle,
-                            (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+        v->mapped_runstate.native->state_entry_time |= XEN_RUNSTATE_UPDATE;
         smp_wmb();
     }
 
-    if ( has_32bit_shinfo(v->domain) )
+    memcpy(v->mapped_runstate.native, &v->runstate, sizeof(v->runstate));
+
+    if ( VM_ASSIST(v->domain, runstate_update_flag) )
     {
-        struct compat_vcpu_runstate_info info;
+        v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+        v->mapped_runstate.native->state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+        smp_wmb();
+    }
+}
 
-        XLAT_vcpu_runstate_info(&info, &v->runstate);
-        __copy_to_guest(v->runstate_guest.compat, &info, 1);
-        rc = true;
+static void update_mapped_runstate_area_compat(struct vcpu *v)
+{
+    if ( VM_ASSIST(v->domain, runstate_update_flag) )
+    {
+        v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+        v->mapped_runstate.compat->state_entry_time |= XEN_RUNSTATE_UPDATE;
+        smp_wmb();
     }
-    else
-        rc = __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
-             sizeof(v->runstate);
 
-    if ( guest_handle )
+    memcpy(v->mapped_runstate.compat, &v->runstate, sizeof(v->runstate));
+
+    if ( VM_ASSIST(v->domain, runstate_update_flag) )
     {
         v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+        v->mapped_runstate.compat->state_entry_time &= ~XEN_RUNSTATE_UPDATE;
         smp_wmb();
-        __raw_copy_to_guest(guest_handle,
-                            (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
     }
+}
 
-    update_guest_memory_policy(v, &policy);
+/* Update per-VCPU guest runstate shared memory area (if registered). */
+bool update_runstate_area(struct vcpu *v)
+{
+    bool rc = true;
+
+    if ( !guest_handle_is_null(runstate_guest(v)) )
+    {
+        struct guest_memory_policy policy = { .nested_guest_mode = false };
+        void __user *guest_handle = NULL;
+
+        update_guest_memory_policy(v, &policy);
+        if ( VM_ASSIST(v->domain, runstate_update_flag) )
+        {
+            guest_handle = has_32bit_shinfo(v->domain)
+                ? &v->runstate_guest.compat.p->state_entry_time + 1
+                : &v->runstate_guest.native.p->state_entry_time + 1;
+            guest_handle--;
+            v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+            __raw_copy_to_guest(guest_handle,
+                                (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+            smp_wmb();
+        }
+
+        if ( has_32bit_shinfo(v->domain) )
+        {
+            struct compat_vcpu_runstate_info info;
+
+            XLAT_vcpu_runstate_info(&info, &v->runstate);
+            __copy_to_guest(v->runstate_guest.compat, &info, 1);
+            rc = true;
+        }
+        else
+            rc = __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
+                 sizeof(v->runstate);
+
+        if ( guest_handle )
+        {
+            v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+            smp_wmb();
+            __raw_copy_to_guest(guest_handle,
+                                (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+        }
+        update_guest_memory_policy(v, &policy);
+    }
+
+    spin_lock(v->mapped_runstate_lock);
+    if ( v->mapped_runstate )
+    {
+        if ( has_32bit_shinfo((v)->domain) )
+            update_mapped_runstate_area_compat(v);
+        else
+            update_mapped_runstate_area_native(v);
+    }
+    spin_unlock(v->mapped_runstate_lock);
 
     return rc;
 }
diff --git a/xen/common/domain.c b/xen/common/domain.c
index ae22049..6df76c6 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -149,6 +149,7 @@ struct vcpu *vcpu_create(
     v->dirty_cpu = VCPU_CPU_CLEAN;
 
     spin_lock_init(&v->virq_lock);
+    spin_lock_init(&v->mapped_runstate_lock);
 
     tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
 
@@ -699,6 +700,69 @@ int rcu_lock_live_remote_domain_by_id(domid_t dom, struct domain **d)
     return 0;
 }
 
+static void _unmap_runstate_area(struct vcpu *v)
+{
+    mfn_t mfn;
+
+    if ( !v->mapped_runstate )
+        return;
+
+    mfn = _mfn(virt_to_mfn(runstate_guest(v).p));
+
+    unmap_domain_page_global((void *)
+                             ((unsigned long)v->mapped_runstate &
+                              PAGE_MASK));
+
+    v->mapped_runstate = NULL;
+    put_page_and_type(mfn_to_page(mfn));
+}
+
+static int map_runstate_area(struct vcpu *v,
+                      struct vcpu_register_runstate_memory_area *area)
+{
+    unsigned long offset = area->addr.p & ~PAGE_MASK;
+    gfn_t gfn = gaddr_to_gfn(area->addr.p);
+    struct domain *d = v->domain;
+    void *mapping;
+    struct page_info *page;
+    size_t size = sizeof (struct vcpu_runstate_info );
+
+    if ( offset > (PAGE_SIZE - size) )
+        return -EINVAL;
+
+    page = get_page_from_gfn(d, gfn_x(gfn), NULL, P2M_ALLOC);
+    if ( !page )
+        return -EINVAL;
+
+    if ( !get_page_type(page, PGT_writable_page) )
+    {
+        put_page(page);
+        return -EINVAL;
+    }
+
+    mapping = __map_domain_page_global(page);
+
+    if ( mapping == NULL )
+    {
+        put_page_and_type(page);
+        return -ENOMEM;
+    }
+
+    spin_lock(&v->mapped_runstate_lock);
+    _unmap_runstate_area(v);
+    v->mapped_runstate = mapping + offset;
+    spin_unlock(&v->mapped_runstate_lock);
+
+    return 0;
+}
+
+static void unmap_runstate_area(struct vcpu *v)
+{
+    spin_lock(&v->mapped_runstate_lock);
+    _unmap_runstate_area(v);
+    spin_unlock(&v->mapped_runstate_lock);
+}
+
 int domain_kill(struct domain *d)
 {
     int rc = 0;
@@ -737,7 +801,11 @@ int domain_kill(struct domain *d)
         if ( cpupool_move_domain(d, cpupool0) )
             return -ERESTART;
         for_each_vcpu ( d, v )
+        {
+            set_xen_guest_handle(runstate_guest(v), NULL);
+            unmap_runstate_area(v);
             unmap_vcpu_info(v);
+        }
         d->is_dying = DOMDYING_dead;
         /* Mem event cleanup has to go here because the rings 
          * have to be put before we call put_domain. */
@@ -1192,6 +1260,7 @@ int domain_soft_reset(struct domain *d)
     for_each_vcpu ( d, v )
     {
         set_xen_guest_handle(runstate_guest(v), NULL);
+        unmap_runstate_area(v);
         unmap_vcpu_info(v);
     }
 
@@ -1536,8 +1605,17 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
     }
 
     case VCPUOP_register_runstate_phys_memory_area:
-        rc = -EOPNOTSUPP;
+    {
+        struct vcpu_register_runstate_memory_area area;
+
+        rc = -EFAULT;
+        if ( copy_from_guest(&area, arg, 1) )
+            break;
+
+        rc = map_runstate_area(v, &area);
+
         break;
+    }
 
 #ifdef VCPU_TRAP_NMI
     case VCPUOP_send_nmi:
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 312fec8..3fb6ea2 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -217,6 +217,8 @@ void vcpu_show_execution_state(struct vcpu *);
 void vcpu_show_registers(const struct vcpu *);
 void vcpu_switch_to_aarch64_mode(struct vcpu *);
 
+void update_runstate_area(struct vcpu *);
+
 /*
  * Due to the restriction of GICv3, the number of vCPUs in AFF0 is
  * limited to 16, thus only the first 4 bits of AFF0 are legal. We will
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index d1bfc82..ecddcfe 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -118,4 +118,6 @@ struct vnuma_info {
 
 void vnuma_destroy(struct vnuma_info *vnuma);
 
+struct vcpu_register_runstate_memory_area;
+
 #endif /* __XEN_DOMAIN_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 748bb0f..2afe31c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -163,15 +163,23 @@ struct vcpu
     void            *sched_priv;    /* scheduler-specific data */
 
     struct vcpu_runstate_info runstate;
+
+    spinlock_t      mapped_runstate_lock;
+
 #ifndef CONFIG_COMPAT
 # define runstate_guest(v) ((v)->runstate_guest)
     XEN_GUEST_HANDLE(vcpu_runstate_info_t) runstate_guest; /* guest address */
+    vcpu_runstate_info_t *mapped_runstate;
 #else
 # define runstate_guest(v) ((v)->runstate_guest.native)
     union {
         XEN_GUEST_HANDLE(vcpu_runstate_info_t) native;
         XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat;
     } runstate_guest; /* guest address */
+    union {
+        vcpu_runstate_info_t* native;
+        vcpu_runstate_info_compat_t* compat;
+    } mapped_runstate; /* guest address */
 #endif
 
     /* last time when vCPU is scheduled out */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-04-23  8:10 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-23  8:10 [PATCH v2 0/2] Introduce runstate area registration with phys address Andrii Anisov
2019-04-23  8:10 ` [PATCH v2 1/2] xen: introduce VCPUOP_register_runstate_phys_memory_area hypercall Andrii Anisov
2019-04-23  8:10   ` [Xen-devel] " Andrii Anisov
2019-05-08 10:10   ` George Dunlap
2019-05-08 10:10     ` [Xen-devel] " George Dunlap
2019-04-23  8:10 ` Andrii Anisov [this message]
2019-04-23  8:10   ` [Xen-devel] [PATCH v2 2/2] xen: implement VCPUOP_register_runstate_phys_memory_area Andrii Anisov
2019-05-08 15:40   ` Julien Grall
2019-05-08 15:40     ` [Xen-devel] " Julien Grall
2019-05-09  9:27     ` Jan Beulich
2019-05-09  9:27       ` [Xen-devel] " Jan Beulich
2019-05-14  9:35       ` Julien Grall
2019-05-14  9:35         ` [Xen-devel] " Julien Grall
2019-05-14  9:48         ` Jan Beulich
2019-05-14  9:48           ` [Xen-devel] " Jan Beulich
2019-05-14 11:23           ` Julien Grall
2019-05-14 11:23             ` [Xen-devel] " Julien Grall
2019-05-14 11:29             ` Jan Beulich
2019-05-14 11:29               ` [Xen-devel] " Jan Beulich
2019-05-13 12:30     ` Andrii Anisov
2019-05-13 12:30       ` [Xen-devel] " Andrii Anisov
2019-05-14  9:58       ` Julien Grall
2019-05-14  9:58         ` [Xen-devel] " Julien Grall
2019-05-14 10:08         ` Andrii Anisov
2019-05-14 10:08           ` [Xen-devel] " Andrii Anisov
2019-05-14 11:24           ` Julien Grall
2019-05-14 11:24             ` [Xen-devel] " Julien Grall
2019-05-14 11:45             ` Andrii Anisov
2019-05-14 11:45               ` [Xen-devel] " Andrii Anisov
2019-05-14 12:02               ` Jan Beulich
2019-05-14 12:02                 ` [Xen-devel] " Jan Beulich
2019-05-14 13:05                 ` Andrii Anisov
2019-05-14 13:05                   ` [Xen-devel] " Andrii Anisov
2019-05-14 13:49                   ` Julien Grall
2019-05-14 13:49                     ` [Xen-devel] " Julien Grall
2019-05-15  9:04                     ` Andrii Anisov
2019-05-15  9:04                       ` [Xen-devel] " Andrii Anisov
2019-05-15 10:31                       ` Julien Grall
2019-05-15 10:31                         ` [Xen-devel] " Julien Grall
2019-05-14 13:49                   ` Jan Beulich
2019-05-14 13:49                     ` [Xen-devel] " Jan Beulich
2019-05-15  8:44                     ` Andrii Anisov
2019-05-15  8:44                       ` [Xen-devel] " Andrii Anisov
2019-05-15 11:59                       ` Jan Beulich
2019-05-15 11:59                         ` [Xen-devel] " Jan Beulich
2019-05-16 12:09   ` Jan Beulich
2019-05-16 12:09     ` [Xen-devel] " Jan Beulich
2019-05-16 13:30     ` Andrii Anisov
2019-05-16 13:30       ` [Xen-devel] " Andrii Anisov
2019-05-16 13:30     ` Andrii Anisov
2019-05-16 13:30       ` [Xen-devel] " Andrii Anisov
2019-05-16 13:48       ` Julien Grall
2019-05-16 13:48         ` [Xen-devel] " Julien Grall
2019-05-16 14:25         ` Andrii Anisov
2019-05-16 14:25           ` [Xen-devel] " Andrii Anisov
2019-05-16 14:28           ` Julien Grall
2019-05-16 14:28             ` [Xen-devel] " Julien Grall
2019-05-16 14:29             ` Andrii Anisov
2019-05-16 14:29               ` [Xen-devel] " Andrii Anisov
     [not found] ` <fa126315-31af-854e-817a-8640b431c82b@arm.com>
     [not found]   ` <CAC1WxdiMzAq5hRC-mhRQuFDs7z_Hj5w7VAy52ec87SJQOGmp3w@mail.gmail.com>
     [not found]     ` <a28f95a1-d9da-2caf-f4b4-013100176b02@arm.com>
     [not found]       ` <090ce8cc-f329-fe54-4894-b7f12e3cd5a6@gmail.com>
2019-05-08 13:39         ` [PATCH v2 0/2] Introduce runstate area registration with phys address Julien Grall
2019-05-08 13:39           ` [Xen-devel] " Julien Grall
2019-05-08 13:54           ` Andrii Anisov
2019-05-08 13:54             ` [Xen-devel] " Andrii Anisov
2019-05-08 14:31             ` Julien Grall
2019-05-08 14:31               ` [Xen-devel] " Julien Grall
2019-05-08 16:01               ` Andrii Anisov
2019-05-08 16:01                 ` [Xen-devel] " Andrii Anisov
2019-05-13 10:50                 ` Julien Grall
2019-05-13 10:50                   ` [Xen-devel] " Julien Grall
2019-05-13 14:34                   ` Andrii Anisov
2019-05-13 14:34                     ` [Xen-devel] " Andrii Anisov
2019-05-08 13:59 ` Julien Grall
2019-05-13 10:15   ` Andrii Anisov
2019-05-13 11:16     ` Julien Grall
2019-05-13 14:14       ` Andrii Anisov
2019-05-13 14:34         ` Julien Grall
2019-05-13 15:29           ` Andrii Anisov
2019-05-13 15:31             ` Julien Grall
2019-05-13 15:38               ` Andrii Anisov
2019-05-13 15:40                 ` Julien Grall
2019-05-13 15:42                   ` Andrii Anisov
2019-05-13 15:45                     ` Julien Grall
2019-05-13 16:05                       ` Andrii Anisov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1556007026-31057-3-git-send-email-andrii.anisov@gmail.com \
    --to=andrii.anisov@gmail.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=andrii_anisov@epam.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).