xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Oleksandr Tyshchenko <olekstysh@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: "Oleksandr Tyshchenko" <oleksandr_tyshchenko@epam.com>,
	"Paul Durrant" <paul@xen.org>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Ian Jackson" <iwj@xenproject.org>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Julien Grall" <julien@xen.org>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"Julien Grall" <julien.grall@arm.com>
Subject: [PATCH V3 08/23] xen/ioreq: Move x86's ioreq_server to struct domain
Date: Mon, 30 Nov 2020 12:31:23 +0200	[thread overview]
Message-ID: <1606732298-22107-9-git-send-email-olekstysh@gmail.com> (raw)
In-Reply-To: <1606732298-22107-1-git-send-email-olekstysh@gmail.com>

From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

The IOREQ is a common feature now and this struct will be used
on Arm as is. Move it to common struct domain. This also
significantly reduces the layering violation in the common code
(*arch.hvm* usage).

We don't move ioreq_gfn since it is not used in the common code
(the "legacy" mechanism is x86 specific).

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
CC: Julien Grall <julien.grall@arm.com>

---
Please note, this is a split/cleanup/hardening of Julien's PoC:
"Add support for Guest IO forwarding to a device emulator"

Changes V1 -> V2:
   - new patch

Changes V2 -> V3:
   - remove the mention of "ioreq_gfn" from patch subject/description
   - update patch according the "legacy interface" is x86 specific
   - drop hvm_params related changes in arch/x86/hvm/hvm.c
   - leave ioreq_gfn in hvm_domain
---
---
 xen/common/ioreq.c               | 60 ++++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/domain.h |  8 ------
 xen/include/xen/sched.h          | 10 +++++++
 3 files changed, 40 insertions(+), 38 deletions(-)

diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index 3e80fc6..b7c2d5a 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -38,13 +38,13 @@ static void set_ioreq_server(struct domain *d, unsigned int id,
                              struct ioreq_server *s)
 {
     ASSERT(id < MAX_NR_IOREQ_SERVERS);
-    ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+    ASSERT(!s || !d->ioreq_server.server[id]);
 
-    d->arch.hvm.ioreq_server.server[id] = s;
+    d->ioreq_server.server[id] = s;
 }
 
 #define GET_IOREQ_SERVER(d, id) \
-    (d)->arch.hvm.ioreq_server.server[id]
+    (d)->ioreq_server.server[id]
 
 static struct ioreq_server *get_ioreq_server(const struct domain *d,
                                              unsigned int id)
@@ -285,7 +285,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
     unsigned int id;
     bool found = false;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -296,7 +296,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
         }
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return found;
 }
@@ -606,7 +606,7 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
         return -ENOMEM;
 
     domain_pause(d);
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
     {
@@ -634,13 +634,13 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
     if ( id )
         *id = i;
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -652,7 +652,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -684,7 +684,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -697,7 +697,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -731,7 +731,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -744,7 +744,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
 
     ASSERT(is_hvm_domain(d));
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -782,7 +782,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
     }
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -798,7 +798,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -834,7 +834,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
     rc = rangeset_add_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -850,7 +850,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -886,7 +886,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
     rc = rangeset_remove_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -911,7 +911,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
     if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -926,7 +926,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
     rc = arch_ioreq_server_map_mem_type(d, s, flags);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -937,7 +937,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -961,7 +961,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     return rc;
 }
 
@@ -971,7 +971,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
     unsigned int id;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -980,7 +980,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
             goto fail;
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return 0;
 
@@ -995,7 +995,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
         hvm_ioreq_server_remove_vcpu(s, v);
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -1005,12 +1005,12 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
     struct ioreq_server *s;
     unsigned int id;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 }
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
@@ -1021,7 +1021,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
     if ( !arch_ioreq_server_destroy_all(d) )
         return;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1039,7 +1039,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
         xfree(s);
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 }
 
 struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
@@ -1271,7 +1271,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
 
 void hvm_ioreq_init(struct domain *d)
 {
-    spin_lock_init(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_init(&d->ioreq_server.lock);
 
     arch_ioreq_domain_init(d);
 }
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 1c4ca47..b8be1ad 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -63,8 +63,6 @@ struct hvm_pi_ops {
     void (*vcpu_block)(struct vcpu *);
 };
 
-#define MAX_NR_IOREQ_SERVERS 8
-
 struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
     struct {
@@ -73,12 +71,6 @@ struct hvm_domain {
         unsigned long legacy_mask; /* indexed by HVM param number */
     } ioreq_gfn;
 
-    /* Lock protects all other values in the sub-struct and the default */
-    struct {
-        spinlock_t              lock;
-        struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
-    } ioreq_server;
-
     /* Cached CF8 for guest PCI config cycles */
     uint32_t                pci_cf8;
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index a345cc0..62cbcdb 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -316,6 +316,8 @@ struct sched_unit {
 
 struct evtchn_port_ops;
 
+#define MAX_NR_IOREQ_SERVERS 8
+
 struct domain
 {
     domid_t          domain_id;
@@ -523,6 +525,14 @@ struct domain
     /* Argo interdomain communication support */
     struct argo_domain *argo;
 #endif
+
+#ifdef CONFIG_IOREQ_SERVER
+    /* Lock protects all other values in the sub-struct and the default */
+    struct {
+        spinlock_t              lock;
+        struct ioreq_server     *server[MAX_NR_IOREQ_SERVERS];
+    } ioreq_server;
+#endif
 };
 
 static inline struct page_list_head *page_to_list(
-- 
2.7.4



  parent reply	other threads:[~2020-11-30 10:32 UTC|newest]

Thread overview: 127+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-30 10:31 Oleksandr Tyshchenko
2020-11-30 10:31 ` [PATCH V3 01/23] x86/ioreq: Prepare IOREQ feature for making it common Oleksandr Tyshchenko
2020-12-01 11:03   ` Alex Bennée
2020-12-01 18:53     ` Oleksandr
2020-12-01 19:36       ` Alex Bennée
2020-12-02  8:00       ` Jan Beulich
2020-12-02 11:19         ` Oleksandr
2020-12-07 11:13   ` Jan Beulich
2020-12-07 15:27     ` Oleksandr
2020-12-07 16:29       ` Jan Beulich
2020-12-07 17:21         ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 02/23] x86/ioreq: Add IOREQ_STATUS_* #define-s and update code for moving Oleksandr Tyshchenko
2020-12-01 11:07   ` Alex Bennée
2020-12-07 11:19   ` Jan Beulich
2020-12-07 15:37     ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 03/23] x86/ioreq: Provide out-of-line wrapper for the handle_mmio() Oleksandr Tyshchenko
2020-12-07 11:27   ` Jan Beulich
2020-12-07 15:39     ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 04/23] xen/ioreq: Make x86's IOREQ feature common Oleksandr Tyshchenko
2020-12-07 11:41   ` Jan Beulich
2020-12-07 19:43     ` Oleksandr
2020-12-08  9:21       ` Jan Beulich
2020-12-08 13:56         ` Oleksandr
2020-12-08 15:02           ` Jan Beulich
2020-12-08 17:24             ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 05/23] xen/ioreq: Make x86's hvm_ioreq_needs_completion() common Oleksandr Tyshchenko
2020-12-07 11:47   ` Jan Beulich
2020-11-30 10:31 ` [PATCH V3 06/23] xen/ioreq: Make x86's hvm_mmio_first(last)_byte() common Oleksandr Tyshchenko
2020-12-07 11:48   ` Jan Beulich
2020-11-30 10:31 ` [PATCH V3 07/23] xen/ioreq: Make x86's hvm_ioreq_(page/vcpu/server) structs common Oleksandr Tyshchenko
2020-12-07 11:54   ` Jan Beulich
2020-11-30 10:31 ` Oleksandr Tyshchenko [this message]
2020-12-07 12:04   ` [PATCH V3 08/23] xen/ioreq: Move x86's ioreq_server to struct domain Jan Beulich
2020-12-07 12:12     ` Paul Durrant
2020-12-07 19:52     ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 09/23] xen/dm: Make x86's DM feature common Oleksandr Tyshchenko
2020-12-07 12:08   ` Jan Beulich
2020-12-07 20:23     ` Oleksandr
2020-12-08  9:30       ` Jan Beulich
2020-12-08 14:54         ` Oleksandr
2021-01-07 14:38           ` Oleksandr
2021-01-07 15:01             ` Jan Beulich
2021-01-07 16:49               ` Oleksandr
2021-01-12 22:23                 ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 10/23] xen/mm: Make x86's XENMEM_resource_ioreq_server handling common Oleksandr Tyshchenko
2020-12-07 11:35   ` Jan Beulich
2020-12-07 12:11     ` Jan Beulich
2020-12-07 21:06       ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 11/23] xen/ioreq: Move x86's io_completion/io_req fields to struct vcpu Oleksandr Tyshchenko
2020-12-07 12:32   ` Jan Beulich
2020-12-07 20:59     ` Oleksandr
2020-12-08  7:52       ` Paul Durrant
2020-12-08  9:35         ` Jan Beulich
2020-12-08 18:21         ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 12/23] xen/ioreq: Remove "hvm" prefixes from involved function names Oleksandr Tyshchenko
2020-12-07 12:45   ` Jan Beulich
2020-12-07 20:28     ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 13/23] xen/ioreq: Use guest_cmpxchg64() instead of cmpxchg() Oleksandr Tyshchenko
2020-12-09 21:32   ` Stefano Stabellini
2020-12-09 22:34     ` Oleksandr
2020-12-10  2:30       ` Stefano Stabellini
2020-11-30 10:31 ` [PATCH V3 14/23] arm/ioreq: Introduce arch specific bits for IOREQ/DM features Oleksandr Tyshchenko
2020-12-09 22:04   ` Stefano Stabellini
2020-12-09 22:49     ` Oleksandr
2020-12-10  2:30       ` Stefano Stabellini
2020-11-30 10:31 ` [PATCH V3 15/23] xen/arm: Stick around in leave_hypervisor_to_guest until I/O has completed Oleksandr Tyshchenko
2020-11-30 20:51   ` Volodymyr Babchuk
2020-12-01 12:46     ` Julien Grall
2020-12-09 23:18   ` Stefano Stabellini
2020-12-09 23:35     ` Stefano Stabellini
2020-12-09 23:47       ` Julien Grall
2020-12-10  2:30         ` Stefano Stabellini
2020-12-10 13:17           ` Julien Grall
2020-12-10 13:21           ` Oleksandr
2020-12-09 23:38     ` Julien Grall
2020-11-30 10:31 ` [PATCH V3 16/23] xen/mm: Handle properly reference in set_foreign_p2m_entry() on Arm Oleksandr Tyshchenko
2020-12-08 14:24   ` Jan Beulich
2020-12-08 16:41     ` Oleksandr
2020-12-09 23:49   ` Stefano Stabellini
2021-01-15  1:18   ` Stefano Stabellini
2020-11-30 10:31 ` [PATCH V3 17/23] xen/ioreq: Introduce domain_has_ioreq_server() Oleksandr Tyshchenko
2020-12-08 15:11   ` Jan Beulich
2020-12-08 15:33     ` Oleksandr
2020-12-08 16:56       ` Oleksandr
2020-12-08 19:43         ` Paul Durrant
2020-12-08 20:16           ` Oleksandr
2020-12-09  9:01             ` Paul Durrant
2020-12-09 18:58               ` Julien Grall
2020-12-09 21:05                 ` Oleksandr
2020-12-09 20:36               ` Oleksandr
2020-12-10  8:38                 ` Paul Durrant
2020-12-10 16:57                   ` Oleksandr
2020-11-30 10:31 ` [PATCH V3 18/23] xen/dm: Introduce xendevicemodel_set_irq_level DM op Oleksandr Tyshchenko
2020-12-10  2:21   ` Stefano Stabellini
2020-12-10 12:58     ` Oleksandr
2020-12-10 13:38     ` Julien Grall
2020-11-30 10:31 ` [PATCH V3 19/23] xen/arm: io: Abstract sign-extension Oleksandr Tyshchenko
2020-11-30 21:03   ` Volodymyr Babchuk
2020-11-30 23:27     ` Oleksandr
2020-12-01  7:55       ` Jan Beulich
2020-12-01 10:30         ` Julien Grall
2020-12-01 10:42           ` Oleksandr
2020-12-01 12:13             ` Julien Grall
2020-12-01 12:24               ` Oleksandr
2020-12-01 12:28                 ` Julien Grall
2020-12-01 10:49           ` Jan Beulich
2020-12-01 10:23       ` Julien Grall
2020-11-30 10:31 ` [PATCH V3 20/23] xen/ioreq: Make x86's send_invalidate_req() common Oleksandr Tyshchenko
2020-12-08 15:24   ` Jan Beulich
2020-12-08 16:49     ` Oleksandr
2020-12-09  8:21       ` Jan Beulich
2020-11-30 10:31 ` [PATCH V3 21/23] xen/arm: Add mapcache invalidation handling Oleksandr Tyshchenko
2020-12-10  2:30   ` Stefano Stabellini
2020-12-10 18:50     ` Julien Grall
2020-12-11  1:28       ` Stefano Stabellini
2020-12-11 11:21         ` Oleksandr
2020-12-11 19:07           ` Stefano Stabellini
2020-12-11 19:37             ` Julien Grall
2020-12-11 19:27         ` Julien Grall
2020-11-30 10:31 ` [PATCH V3 22/23] libxl: Introduce basic virtio-mmio support on Arm Oleksandr Tyshchenko
2020-11-30 10:31 ` [PATCH V3 23/23] [RFC] libxl: Add support for virtio-disk configuration Oleksandr Tyshchenko
2020-11-30 11:22 ` [PATCH V3 00/23] IOREQ feature (+ virtio-mmio) on Arm Oleksandr
2020-12-07 13:03   ` Wei Chen
2020-12-07 21:03     ` Oleksandr
2020-11-30 16:21 ` Alex Bennée
2020-11-30 22:22   ` [PATCH V3 00/23] IOREQ feature (+ virtio-mmio) on Arm Oleksandr
2020-12-29 15:32   ` Roger Pau Monné

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1606732298-22107-9-git-send-email-olekstysh@gmail.com \
    --to=olekstysh@gmail.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=iwj@xenproject.org \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=julien@xen.org \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=paul@xen.org \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).