All of lore.kernel.org
 help / color / mirror / Atom feed
From: Oleksandr Tyshchenko <olekstysh@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: "Oleksandr Tyshchenko" <oleksandr_tyshchenko@epam.com>,
	"Paul Durrant" <paul@xen.org>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Ian Jackson" <iwj@xenproject.org>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Julien Grall" <julien@xen.org>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"Julien Grall" <julien.grall@arm.com>
Subject: [PATCH V4 08/24] xen/ioreq: Move x86's ioreq_server to struct domain
Date: Tue, 12 Jan 2021 23:52:16 +0200	[thread overview]
Message-ID: <1610488352-18494-9-git-send-email-olekstysh@gmail.com> (raw)
In-Reply-To: <1610488352-18494-1-git-send-email-olekstysh@gmail.com>

From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

The IOREQ is a common feature now and this struct will be used
on Arm as is. Move it to common struct domain. This also
significantly reduces the layering violation in the common code
(*arch.hvm* usage).

We don't move ioreq_gfn since it is not used in the common code
(the "legacy" mechanism is x86 specific).

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
CC: Julien Grall <julien.grall@arm.com>
[On Arm only]
Tested-by: Wei Chen <Wei.Chen@arm.com>

---
Please note, this is a split/cleanup/hardening of Julien's PoC:
"Add support for Guest IO forwarding to a device emulator"

Changes V1 -> V2:
   - new patch

Changes V2 -> V3:
   - remove the mention of "ioreq_gfn" from patch subject/description
   - update patch according the "legacy interface" is x86 specific
   - drop hvm_params related changes in arch/x86/hvm/hvm.c
   - leave ioreq_gfn in hvm_domain

Changes V3 -> V4:
   - rebase
   - drop the stale part of the comment above struct ioreq_server
   - add Jan's A-b
---
 xen/common/ioreq.c               | 60 ++++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/domain.h |  8 ------
 xen/include/xen/sched.h          | 10 +++++++
 3 files changed, 40 insertions(+), 38 deletions(-)

diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index 3f631ec..a319c88 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -38,13 +38,13 @@ static void set_ioreq_server(struct domain *d, unsigned int id,
                              struct ioreq_server *s)
 {
     ASSERT(id < MAX_NR_IOREQ_SERVERS);
-    ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+    ASSERT(!s || !d->ioreq_server.server[id]);
 
-    d->arch.hvm.ioreq_server.server[id] = s;
+    d->ioreq_server.server[id] = s;
 }
 
 #define GET_IOREQ_SERVER(d, id) \
-    (d)->arch.hvm.ioreq_server.server[id]
+    (d)->ioreq_server.server[id]
 
 static struct ioreq_server *get_ioreq_server(const struct domain *d,
                                              unsigned int id)
@@ -285,7 +285,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
     unsigned int id;
     bool found = false;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -296,7 +296,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
         }
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return found;
 }
@@ -606,7 +606,7 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
         return -ENOMEM;
 
     domain_pause(d);
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
     {
@@ -634,13 +634,13 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
     if ( id )
         *id = i;
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -652,7 +652,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -684,7 +684,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -697,7 +697,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -731,7 +731,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -744,7 +744,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
 
     ASSERT(is_hvm_domain(d));
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -782,7 +782,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
     }
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -798,7 +798,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -834,7 +834,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
     rc = rangeset_add_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -850,7 +850,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -886,7 +886,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
     rc = rangeset_remove_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -911,7 +911,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
     if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -926,7 +926,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
     rc = arch_ioreq_server_map_mem_type(d, s, flags);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     if ( rc == 0 )
         arch_ioreq_server_map_mem_type_completed(d, s, flags);
@@ -940,7 +940,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -964,7 +964,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     return rc;
 }
 
@@ -974,7 +974,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
     unsigned int id;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -983,7 +983,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
             goto fail;
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return 0;
 
@@ -998,7 +998,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
         hvm_ioreq_server_remove_vcpu(s, v);
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -1008,12 +1008,12 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
     struct ioreq_server *s;
     unsigned int id;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 }
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
@@ -1024,7 +1024,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
     if ( !arch_ioreq_server_destroy_all(d) )
         return;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1042,7 +1042,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
         xfree(s);
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 }
 
 struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
@@ -1274,7 +1274,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
 
 void hvm_ioreq_init(struct domain *d)
 {
-    spin_lock_init(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_init(&d->ioreq_server.lock);
 
     arch_ioreq_domain_init(d);
 }
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 1c4ca47..b8be1ad 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -63,8 +63,6 @@ struct hvm_pi_ops {
     void (*vcpu_block)(struct vcpu *);
 };
 
-#define MAX_NR_IOREQ_SERVERS 8
-
 struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
     struct {
@@ -73,12 +71,6 @@ struct hvm_domain {
         unsigned long legacy_mask; /* indexed by HVM param number */
     } ioreq_gfn;
 
-    /* Lock protects all other values in the sub-struct and the default */
-    struct {
-        spinlock_t              lock;
-        struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
-    } ioreq_server;
-
     /* Cached CF8 for guest PCI config cycles */
     uint32_t                pci_cf8;
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 3e46384..ad0d761 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -318,6 +318,8 @@ struct sched_unit {
 
 struct evtchn_port_ops;
 
+#define MAX_NR_IOREQ_SERVERS 8
+
 struct domain
 {
     domid_t          domain_id;
@@ -533,6 +535,14 @@ struct domain
     struct {
         unsigned int val;
     } teardown;
+
+#ifdef CONFIG_IOREQ_SERVER
+    /* Lock protects all other values in the sub-struct */
+    struct {
+        spinlock_t              lock;
+        struct ioreq_server     *server[MAX_NR_IOREQ_SERVERS];
+    } ioreq_server;
+#endif
 };
 
 static inline struct page_list_head *page_to_list(
-- 
2.7.4



  parent reply	other threads:[~2021-01-12 21:53 UTC|newest]

Thread overview: 144+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-12 21:52 [PATCH V4 00/24] IOREQ feature (+ virtio-mmio) on Arm Oleksandr Tyshchenko
2021-01-12 21:52 ` [PATCH V4 01/24] x86/ioreq: Prepare IOREQ feature for making it common Oleksandr Tyshchenko
2021-01-15 15:16   ` Julien Grall
2021-01-15 16:41   ` Jan Beulich
2021-01-16  9:48     ` Oleksandr
2021-01-18  8:22   ` Paul Durrant
2021-01-12 21:52 ` [PATCH V4 02/24] x86/ioreq: Add IOREQ_STATUS_* #define-s and update code for moving Oleksandr Tyshchenko
2021-01-15 15:17   ` Julien Grall
2021-01-18  8:24   ` Paul Durrant
2021-01-12 21:52 ` [PATCH V4 03/24] x86/ioreq: Provide out-of-line wrapper for the handle_mmio() Oleksandr Tyshchenko
2021-01-15 14:48   ` Alex Bennée
2021-01-15 15:19   ` Julien Grall
2021-01-18  8:29   ` Paul Durrant
2021-01-12 21:52 ` [PATCH V4 04/24] xen/ioreq: Make x86's IOREQ feature common Oleksandr Tyshchenko
2021-01-15 14:55   ` Alex Bennée
2021-01-15 15:23   ` Julien Grall
2021-01-18  8:48   ` Paul Durrant
2021-01-12 21:52 ` [PATCH V4 05/24] xen/ioreq: Make x86's hvm_ioreq_needs_completion() common Oleksandr Tyshchenko
2021-01-15 15:25   ` Julien Grall
2021-01-20  8:48   ` Alex Bennée
2021-01-20  9:31     ` Julien Grall
2021-01-12 21:52 ` [PATCH V4 06/24] xen/ioreq: Make x86's hvm_mmio_first(last)_byte() common Oleksandr Tyshchenko
2021-01-15 15:34   ` Julien Grall
2021-01-20  8:57   ` Alex Bennée
2021-01-20 16:15   ` Jan Beulich
2021-01-20 20:47     ` Oleksandr
2021-01-12 21:52 ` [PATCH V4 07/24] xen/ioreq: Make x86's hvm_ioreq_(page/vcpu/server) structs common Oleksandr Tyshchenko
2021-01-15 15:36   ` Julien Grall
2021-01-18  8:59   ` Paul Durrant
2021-01-20  8:58   ` Alex Bennée
2021-01-12 21:52 ` Oleksandr Tyshchenko [this message]
2021-01-15 15:44   ` [PATCH V4 08/24] xen/ioreq: Move x86's ioreq_server to struct domain Julien Grall
2021-01-18  9:09   ` Paul Durrant
2021-01-20  9:00   ` Alex Bennée
2021-01-12 21:52 ` [PATCH V4 09/24] xen/ioreq: Make x86's IOREQ related dm-op handling common Oleksandr Tyshchenko
2021-01-18  9:17   ` Paul Durrant
2021-01-18 10:19     ` Oleksandr
2021-01-18 10:34       ` Paul Durrant
2021-01-20 16:21   ` Jan Beulich
2021-01-21 10:23     ` Oleksandr
2021-01-21 10:27       ` Jan Beulich
2021-01-21 11:13         ` Oleksandr
2021-01-12 21:52 ` [PATCH V4 10/24] xen/ioreq: Move x86's io_completion/io_req fields to struct vcpu Oleksandr Tyshchenko
2021-01-15 19:34   ` Julien Grall
2021-01-18  9:35   ` Paul Durrant
2021-01-20 16:24   ` Jan Beulich
2021-01-12 21:52 ` [PATCH V4 11/24] xen/mm: Make x86's XENMEM_resource_ioreq_server handling common Oleksandr Tyshchenko
2021-01-14  3:58   ` Wei Chen
2021-01-14 15:31     ` Oleksandr
2021-01-15 14:35       ` Alex Bennée
2021-01-18 17:42         ` Oleksandr
2021-01-18  9:38   ` Paul Durrant
2021-01-12 21:52 ` [PATCH V4 12/24] xen/ioreq: Remove "hvm" prefixes from involved function names Oleksandr Tyshchenko
2021-01-18  9:55   ` Paul Durrant
2021-01-12 21:52 ` [PATCH V4 13/24] xen/ioreq: Use guest_cmpxchg64() instead of cmpxchg() Oleksandr Tyshchenko
2021-01-15 19:37   ` Julien Grall
2021-01-17 11:32     ` Oleksandr
2021-01-18 10:00   ` Paul Durrant
2021-01-12 21:52 ` [PATCH V4 14/24] arm/ioreq: Introduce arch specific bits for IOREQ/DM features Oleksandr Tyshchenko
2021-01-15  0:55   ` Stefano Stabellini
2021-01-17 12:45     ` Oleksandr
2021-01-20  0:23       ` Stefano Stabellini
2021-01-21  9:51         ` Oleksandr
2021-01-15 20:26   ` Julien Grall
2021-01-17 17:11     ` Oleksandr
2021-01-17 18:07       ` Julien Grall
2021-01-17 18:52         ` Oleksandr
2021-01-18 19:17           ` Julien Grall
2021-01-19 15:20             ` Oleksandr
2021-01-20  0:50               ` Stefano Stabellini
2021-01-20 15:57                 ` Julien Grall
2021-01-20 19:47                   ` Stefano Stabellini
2021-01-21  9:31                     ` Oleksandr
2021-01-21 21:34                       ` Stefano Stabellini
2021-01-20 15:50           ` Julien Grall
2021-01-21  8:50             ` Oleksandr
2021-01-27 10:24               ` Jan Beulich
2021-01-27 12:22                 ` Oleksandr
2021-01-27 12:52                   ` Jan Beulich
2021-01-18 10:44       ` Jan Beulich
2021-01-18 15:52         ` Oleksandr
2021-01-18 16:00           ` Jan Beulich
2021-01-18 16:29             ` Oleksandr
2021-01-12 21:52 ` [PATCH V4 15/24] xen/arm: Stick around in leave_hypervisor_to_guest until I/O has completed Oleksandr Tyshchenko
2021-01-15  1:12   ` Stefano Stabellini
2021-01-15 20:55   ` Julien Grall
2021-01-17 20:23     ` Oleksandr
2021-01-18 10:57       ` Julien Grall
2021-01-18 13:23         ` Oleksandr
2021-01-12 21:52 ` [PATCH V4 16/24] xen/mm: Handle properly reference in set_foreign_p2m_entry() on Arm Oleksandr Tyshchenko
2021-01-15  1:19   ` Stefano Stabellini
2021-01-15 20:59   ` Julien Grall
2021-01-21 13:57   ` Jan Beulich
2021-01-21 18:42     ` Oleksandr
2021-01-12 21:52 ` [PATCH V4 17/24] xen/ioreq: Introduce domain_has_ioreq_server() Oleksandr Tyshchenko
2021-01-15  1:24   ` Stefano Stabellini
2021-01-18 10:23   ` Paul Durrant
2021-01-12 21:52 ` [PATCH V4 18/24] xen/dm: Introduce xendevicemodel_set_irq_level DM op Oleksandr Tyshchenko
2021-01-15  1:32   ` Stefano Stabellini
2021-01-12 21:52 ` [PATCH V4 19/24] xen/arm: io: Abstract sign-extension Oleksandr Tyshchenko
2021-01-15  1:35   ` Stefano Stabellini
2021-01-12 21:52 ` [PATCH V4 20/24] xen/arm: io: Harden sign extension check Oleksandr Tyshchenko
2021-01-15  1:48   ` Stefano Stabellini
2021-01-22 10:15   ` Volodymyr Babchuk
2021-01-12 21:52 ` [PATCH V4 21/24] xen/ioreq: Make x86's send_invalidate_req() common Oleksandr Tyshchenko
2021-01-18 10:31   ` Paul Durrant
2021-01-21 14:02     ` Jan Beulich
2021-01-12 21:52 ` [PATCH V4 22/24] xen/arm: Add mapcache invalidation handling Oleksandr Tyshchenko
2021-01-15  2:11   ` Stefano Stabellini
2021-01-21 19:47     ` Oleksandr
2021-01-12 21:52 ` [PATCH V4 23/24] libxl: Introduce basic virtio-mmio support on Arm Oleksandr Tyshchenko
2021-01-15 21:30   ` Julien Grall
2021-01-17 22:22     ` Oleksandr
2021-01-20 16:40       ` Julien Grall
2021-01-20 20:35         ` Stefano Stabellini
2021-02-09 21:04         ` Oleksandr
2021-01-12 21:52 ` [PATCH V4 24/24] [RFC] libxl: Add support for virtio-disk configuration Oleksandr Tyshchenko
2021-01-14 17:20   ` Ian Jackson
2021-01-16  9:05     ` Oleksandr
2021-01-15 22:01   ` Julien Grall
2021-01-18  8:32     ` Oleksandr
2021-01-20 17:05       ` Julien Grall
2021-02-10  9:02         ` Oleksandr
2021-03-06 19:52           ` Julien Grall
2021-01-14  3:55 ` [PATCH V4 00/24] IOREQ feature (+ virtio-mmio) on Arm Wei Chen
2021-01-14 15:23   ` Oleksandr
2021-01-07 14:35     ` [ANNOUNCE] Xen 4.15 release schedule and feature tracking Ian Jackson
2021-01-07 15:45       ` Oleksandr
2021-01-14 16:11         ` [PATCH V4 00/24] IOREQ feature (+ virtio-mmio) on Arm Ian Jackson
2021-01-14 18:41           ` Oleksandr
2021-01-14 16:06       ` [ANNOUNCE] Xen 4.15 release schedule and feature tracking Ian Jackson
2021-01-14 19:02         ` Andrew Cooper
2021-01-15  9:57           ` Jan Beulich
2021-01-15 10:00             ` Julien Grall
2021-01-15 10:52             ` Andrew Cooper
2021-01-15 10:59               ` Andrew Cooper
2021-01-15 11:08                 ` Jan Beulich
2021-01-15 10:43           ` Bertrand Marquis
2021-01-15 15:14           ` Lengyel, Tamas
2021-01-28 22:55             ` Dario Faggioli
2021-01-28 18:26           ` Dario Faggioli
2021-01-28 22:15             ` Dario Faggioli
2021-01-29  8:38             ` Jan Beulich
2021-01-29  9:22               ` Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1610488352-18494-9-git-send-email-olekstysh@gmail.com \
    --to=olekstysh@gmail.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=iwj@xenproject.org \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=julien@xen.org \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=paul@xen.org \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.