All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v4 06/12] x86/hvm/ioreq: rename .*pfn and .*gmfn to .*gfn
Date: Tue, 5 Sep 2017 12:37:10 +0100	[thread overview]
Message-ID: <20170905113716.3960-7-paul.durrant@citrix.com> (raw)
In-Reply-To: <20170905113716.3960-1-paul.durrant@citrix.com>

Since ioreq servers are only relevant to HVM guests and all the names in
question unequivocally refer to guest frame numbers, name them all .*gfn
to avoid any confusion.

This patch is purely cosmetic. No semantic or functional change.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 tools/libs/devicemodel/core.c                   | 10 ++--
 tools/libs/devicemodel/include/xendevicemodel.h | 12 ++--
 xen/arch/x86/hvm/dm.c                           |  4 +-
 xen/arch/x86/hvm/hvm.c                          |  6 +-
 xen/arch/x86/hvm/ioreq.c                        | 74 ++++++++++++-------------
 xen/include/asm-x86/hvm/domain.h                |  4 +-
 xen/include/asm-x86/hvm/ioreq.h                 |  4 +-
 xen/include/public/hvm/dm_op.h                  | 20 +++----
 8 files changed, 67 insertions(+), 67 deletions(-)

diff --git a/tools/libs/devicemodel/core.c b/tools/libs/devicemodel/core.c
index d7c6476006..fcb260d29b 100644
--- a/tools/libs/devicemodel/core.c
+++ b/tools/libs/devicemodel/core.c
@@ -174,7 +174,7 @@ int xendevicemodel_create_ioreq_server(
 
 int xendevicemodel_get_ioreq_server_info(
     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
-    xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
+    xen_pfn_t *ioreq_gfn, xen_pfn_t *bufioreq_gfn,
     evtchn_port_t *bufioreq_port)
 {
     struct xen_dm_op op;
@@ -192,11 +192,11 @@ int xendevicemodel_get_ioreq_server_info(
     if (rc)
         return rc;
 
-    if (ioreq_pfn)
-        *ioreq_pfn = data->ioreq_pfn;
+    if (ioreq_gfn)
+        *ioreq_gfn = data->ioreq_gfn;
 
-    if (bufioreq_pfn)
-        *bufioreq_pfn = data->bufioreq_pfn;
+    if (bufioreq_gfn)
+        *bufioreq_gfn = data->bufioreq_gfn;
 
     if (bufioreq_port)
         *bufioreq_port = data->bufioreq_port;
diff --git a/tools/libs/devicemodel/include/xendevicemodel.h b/tools/libs/devicemodel/include/xendevicemodel.h
index 580fad2f49..13216db04a 100644
--- a/tools/libs/devicemodel/include/xendevicemodel.h
+++ b/tools/libs/devicemodel/include/xendevicemodel.h
@@ -60,17 +60,17 @@ int xendevicemodel_create_ioreq_server(
  * @parm dmod a handle to an open devicemodel interface.
  * @parm domid the domain id to be serviced
  * @parm id the IOREQ Server id.
- * @parm ioreq_pfn pointer to a xen_pfn_t to receive the synchronous ioreq
- *                  gmfn
- * @parm bufioreq_pfn pointer to a xen_pfn_t to receive the buffered ioreq
- *                    gmfn
+ * @parm ioreq_gfn pointer to a xen_pfn_t to receive the synchronous ioreq
+ *                  gfn
+ * @parm bufioreq_gfn pointer to a xen_pfn_t to receive the buffered ioreq
+ *                    gfn
  * @parm bufioreq_port pointer to a evtchn_port_t to receive the buffered
  *                     ioreq event channel
  * @return 0 on success, -1 on failure.
  */
 int xendevicemodel_get_ioreq_server_info(
     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
-    xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
+    xen_pfn_t *ioreq_gfn, xen_pfn_t *bufioreq_gfn,
     evtchn_port_t *bufioreq_port);
 
 /**
@@ -168,7 +168,7 @@ int xendevicemodel_destroy_ioreq_server(
  * This function sets IOREQ Server state. An IOREQ Server
  * will not be passed emulation requests until it is in
  * the enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_pfn are
+ * Note that the contents of the ioreq_gfn and bufioreq_gfn are
  * not meaningful until the IOREQ Server is in the enabled state.
  *
  * @parm dmod a handle to an open devicemodel interface.
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 4cf6deedc7..f7cb883fec 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -426,8 +426,8 @@ static int dm_op(const struct dmop_args *op_args)
             break;
 
         rc = hvm_get_ioreq_server_info(d, data->id,
-                                       &data->ioreq_pfn,
-                                       &data->bufioreq_pfn,
+                                       &data->ioreq_gfn,
+                                       &data->bufioreq_gfn,
                                        &data->bufioreq_port);
         break;
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6cb903def5..58b4afa1d1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4185,20 +4185,20 @@ static int hvmop_set_param(
             rc = -EINVAL;
         break;
     case HVM_PARAM_IOREQ_SERVER_PFN:
-        d->arch.hvm_domain.ioreq_gmfn.base = a.value;
+        d->arch.hvm_domain.ioreq_gfn.base = a.value;
         break;
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     {
         unsigned int i;
 
         if ( a.value == 0 ||
-             a.value > sizeof(d->arch.hvm_domain.ioreq_gmfn.mask) * 8 )
+             a.value > sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8 )
         {
             rc = -EINVAL;
             break;
         }
         for ( i = 0; i < a.value; i++ )
-            set_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
+            set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
 
         break;
     }
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 752976d16d..69913cf3cd 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -181,17 +181,17 @@ bool_t handle_hvm_io_completion(struct vcpu *v)
     return 1;
 }
 
-static int hvm_alloc_ioreq_gmfn(struct domain *d, unsigned long *gmfn)
+static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
 {
     unsigned int i;
     int rc;
 
     rc = -ENOMEM;
-    for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gmfn.mask) * 8; i++ )
+    for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
     {
-        if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask) )
+        if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
         {
-            *gmfn = d->arch.hvm_domain.ioreq_gmfn.base + i;
+            *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
             rc = 0;
             break;
         }
@@ -200,12 +200,12 @@ static int hvm_alloc_ioreq_gmfn(struct domain *d, unsigned long *gmfn)
     return rc;
 }
 
-static void hvm_free_ioreq_gmfn(struct domain *d, unsigned long gmfn)
+static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
 {
-    unsigned int i = gmfn - d->arch.hvm_domain.ioreq_gmfn.base;
+    unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
 
-    if ( gmfn != gfn_x(INVALID_GFN) )
-        set_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
+    if ( gfn != gfn_x(INVALID_GFN) )
+        set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
 }
 
 static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
@@ -216,7 +216,7 @@ static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
 }
 
 static int hvm_map_ioreq_page(
-    struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn)
+    struct hvm_ioreq_server *s, bool_t buf, unsigned long gfn)
 {
     struct domain *d = s->domain;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
@@ -224,7 +224,7 @@ static int hvm_map_ioreq_page(
     void *va;
     int rc;
 
-    if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) )
+    if ( (rc = prepare_ring_for_helper(d, gfn, &page, &va)) )
         return rc;
 
     if ( (iorp->va != NULL) || d->is_dying )
@@ -235,7 +235,7 @@ static int hvm_map_ioreq_page(
 
     iorp->va = va;
     iorp->page = page;
-    iorp->gmfn = gmfn;
+    iorp->gfn = gfn;
 
     return 0;
 }
@@ -264,23 +264,23 @@ bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
     return found;
 }
 
-static void hvm_remove_ioreq_gmfn(
+static void hvm_remove_ioreq_gfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
-    if ( guest_physmap_remove_page(d, _gfn(iorp->gmfn),
+    if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
                                    _mfn(page_to_mfn(iorp->page)), 0) )
         domain_crash(d);
     clear_page(iorp->va);
 }
 
-static int hvm_add_ioreq_gmfn(
+static int hvm_add_ioreq_gfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
     int rc;
 
     clear_page(iorp->va);
 
-    rc = guest_physmap_add_page(d, _gfn(iorp->gmfn),
+    rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
                                 _mfn(page_to_mfn(iorp->page)), 0);
     if ( rc == 0 )
         paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
@@ -412,17 +412,17 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
 }
 
 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
-                                      unsigned long ioreq_pfn,
-                                      unsigned long bufioreq_pfn)
+                                      unsigned long ioreq_gfn,
+                                      unsigned long bufioreq_gfn)
 {
     int rc;
 
-    rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
+    rc = hvm_map_ioreq_page(s, 0, ioreq_gfn);
     if ( rc )
         return rc;
 
-    if ( bufioreq_pfn != gfn_x(INVALID_GFN) )
-        rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
+    if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
+        rc = hvm_map_ioreq_page(s, 1, bufioreq_gfn);
 
     if ( rc )
         hvm_unmap_ioreq_page(s, 0);
@@ -435,8 +435,8 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
                                         bool_t handle_bufioreq)
 {
     struct domain *d = s->domain;
-    unsigned long ioreq_pfn = gfn_x(INVALID_GFN);
-    unsigned long bufioreq_pfn = gfn_x(INVALID_GFN);
+    unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
+    unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
     int rc;
 
     if ( is_default )
@@ -451,18 +451,18 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
                    d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
     }
 
-    rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
+    rc = hvm_alloc_ioreq_gfn(d, &ioreq_gfn);
 
     if ( !rc && handle_bufioreq )
-        rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
+        rc = hvm_alloc_ioreq_gfn(d, &bufioreq_gfn);
 
     if ( !rc )
-        rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn);
+        rc = hvm_ioreq_server_map_pages(s, ioreq_gfn, bufioreq_gfn);
 
     if ( rc )
     {
-        hvm_free_ioreq_gmfn(d, ioreq_pfn);
-        hvm_free_ioreq_gmfn(d, bufioreq_pfn);
+        hvm_free_ioreq_gfn(d, ioreq_gfn);
+        hvm_free_ioreq_gfn(d, bufioreq_gfn);
     }
 
     return rc;
@@ -482,9 +482,9 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
     if ( !is_default )
     {
         if ( handle_bufioreq )
-            hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+            hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
 
-        hvm_free_ioreq_gmfn(d, s->ioreq.gmfn);
+        hvm_free_ioreq_gfn(d, s->ioreq.gfn);
     }
 }
 
@@ -556,10 +556,10 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
 
     if ( !is_default )
     {
-        hvm_remove_ioreq_gmfn(d, &s->ioreq);
+        hvm_remove_ioreq_gfn(d, &s->ioreq);
 
         if ( handle_bufioreq )
-            hvm_remove_ioreq_gmfn(d, &s->bufioreq);
+            hvm_remove_ioreq_gfn(d, &s->bufioreq);
     }
 
     s->enabled = 1;
@@ -587,9 +587,9 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
     if ( !is_default )
     {
         if ( handle_bufioreq )
-            hvm_add_ioreq_gmfn(d, &s->bufioreq);
+            hvm_add_ioreq_gfn(d, &s->bufioreq);
 
-        hvm_add_ioreq_gmfn(d, &s->ioreq);
+        hvm_add_ioreq_gfn(d, &s->ioreq);
     }
 
     s->enabled = 0;
@@ -776,8 +776,8 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
 }
 
 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
+                              unsigned long *ioreq_gfn,
+                              unsigned long *bufioreq_gfn,
                               evtchn_port_t *bufioreq_port)
 {
     struct hvm_ioreq_server *s;
@@ -796,11 +796,11 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
         if ( s->id != id )
             continue;
 
-        *ioreq_pfn = s->ioreq.gmfn;
+        *ioreq_gfn = s->ioreq.gfn;
 
         if ( s->bufioreq.va != NULL )
         {
-            *bufioreq_pfn = s->bufioreq.gmfn;
+            *bufioreq_gfn = s->bufioreq.gfn;
             *bufioreq_port = s->bufioreq_evtchn;
         }
 
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index d2899c9bb2..ce536f75ef 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -36,7 +36,7 @@
 #include <public/hvm/dm_op.h>
 
 struct hvm_ioreq_page {
-    unsigned long gmfn;
+    unsigned long gfn;
     struct page_info *page;
     void *va;
 };
@@ -105,7 +105,7 @@ struct hvm_domain {
     struct {
         unsigned long base;
         unsigned long mask;
-    } ioreq_gmfn;
+    } ioreq_gfn;
 
     /* Lock protects all other values in the sub-struct and the default */
     struct {
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index b43667a367..43fbe115dc 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -28,8 +28,8 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
                             ioservid_t *id);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
+                              unsigned long *ioreq_gfn,
+                              unsigned long *bufioreq_gfn,
                               evtchn_port_t *bufioreq_port);
 int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
                                      uint32_t type, uint64_t start,
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index 2a4c3d938d..6bbab5fca3 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -41,9 +41,9 @@
  * A domain supports a single 'legacy' IOREQ Server which is instantiated if
  * parameter...
  *
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
  * ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
  * ioreq ring), or...
  * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
  * to request buffered I/O emulation).
@@ -81,14 +81,14 @@ struct xen_dm_op_create_ioreq_server {
  *
  * The emulator needs to map the synchronous ioreq structures and buffered
  * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in the target domain's gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * hosted in the target domain's gmfns <ioreq_gfn> and <bufioreq_gfn>
  * respectively. In addition, if the IOREQ Server is handling buffered
  * emulation requests, the emulator needs to bind to event channel
  * <bufioreq_port> to listen for them. (The event channels used for
  * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
+ * structures in <ioreq_gfn>).
  * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ * values handed back in <bufioreq_gfn> and <bufioreq_port> will both be 0.
  */
 #define XEN_DMOP_get_ioreq_server_info 2
 
@@ -98,10 +98,10 @@ struct xen_dm_op_get_ioreq_server_info {
     uint16_t pad;
     /* OUT - buffered ioreq port */
     evtchn_port_t bufioreq_port;
-    /* OUT - sync ioreq pfn */
-    uint64_aligned_t ioreq_pfn;
-    /* OUT - buffered ioreq pfn */
-    uint64_aligned_t bufioreq_pfn;
+    /* OUT - sync ioreq gfn */
+    uint64_aligned_t ioreq_gfn;
+    /* OUT - buffered ioreq gfn */
+    uint64_aligned_t bufioreq_gfn;
 };
 
 /*
@@ -150,7 +150,7 @@ struct xen_dm_op_ioreq_server_range {
  *
  * The IOREQ Server will not be passed any emulation requests until it is
  * in the enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * Note that the contents of the ioreq_gfn and bufioreq_gfn (see
  * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
  * is in the enabled state.
  */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-09-05 11:37 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-05 11:37 [PATCH v4 00/12] x86: guest resource mapping Paul Durrant
2017-09-05 11:37 ` [PATCH v4 01/12] x86/mm: allow a privileged PV domain to map guest mfns Paul Durrant
2017-09-07 11:02   ` Wei Liu
2017-09-07 11:05     ` Paul Durrant
2017-09-07 11:09       ` Wei Liu
2017-09-07 11:19     ` Andrew Cooper
2017-09-05 11:37 ` [PATCH v4 02/12] x86/mm: add HYPERVISOR_memory_op to acquire guest resources Paul Durrant
2017-09-07 11:10   ` Wei Liu
2017-09-07 11:18     ` Paul Durrant
2017-09-07 11:36       ` Wei Liu
2017-09-07 11:37         ` Paul Durrant
2017-09-07 11:54         ` Jan Beulich
2017-09-05 11:37 ` [PATCH v4 03/12] tools/libxenforeignmemory: add support for resource mapping Paul Durrant
2017-09-07 11:48   ` Wei Liu
2017-09-05 11:37 ` [PATCH v4 04/12] tools/libxenforeignmemory: reduce xenforeignmemory_restrict code footprint Paul Durrant
2017-09-07 11:48   ` Wei Liu
2017-09-05 11:37 ` [PATCH v4 05/12] tools/libxenctrl: use new xenforeignmemory API to seed grant table Paul Durrant
2017-09-07 11:49   ` Wei Liu
2017-09-05 11:37 ` Paul Durrant [this message]
2017-09-05 11:37 ` [PATCH v4 07/12] x86/hvm/ioreq: use bool rather than bool_t Paul Durrant
2017-09-07 11:10   ` Wei Liu
2017-09-05 11:37 ` [PATCH v4 08/12] x86/hvm/ioreq: maintain an array of ioreq servers rather than a list Paul Durrant
2017-09-07 11:40   ` Wei Liu
2017-09-07 14:41   ` Roger Pau Monné
2017-09-07 14:51     ` Juergen Gross
2017-09-07 14:57       ` Roger Pau Monné
2017-09-07 15:16       ` Jan Beulich
2017-09-08  8:32         ` Paul Durrant
2017-09-05 11:37 ` [PATCH v4 09/12] x86/hvm/ioreq: simplify code and use consistent naming Paul Durrant
2017-09-07 11:45   ` Wei Liu
2017-09-05 11:37 ` [PATCH v4 10/12] x86/hvm/ioreq: use gfn_t in struct hvm_ioreq_page Paul Durrant
2017-09-07 11:45   ` Wei Liu
2017-09-05 11:37 ` [PATCH v4 11/12] x86/hvm/ioreq: defer mapping gfns until they are actually requsted Paul Durrant
2017-09-07 12:00   ` Wei Liu
2017-09-07 12:03     ` Paul Durrant
2017-09-07 12:16       ` Wei Liu
2017-09-07 12:29         ` Paul Durrant
2017-09-07 14:22           ` Wei Liu
2017-09-08  8:34             ` Paul Durrant
2017-09-05 11:37 ` [PATCH v4 12/12] x86/hvm/ioreq: add a new mappable resource type Paul Durrant
2017-09-07 14:51   ` Wei Liu
2017-09-08  8:28     ` Paul Durrant
2017-09-07 14:56   ` Roger Pau Monné
2017-09-07 14:59     ` Wei Liu
2017-09-07 15:23       ` Roger Pau Monné
2017-09-08  8:29         ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170905113716.3960-7-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.