All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4 0/4] xen/pvh: enable migration on PVH Dom0
@ 2015-05-07 14:29 Roger Pau Monne
  2015-05-07 14:29 ` [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
                   ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Roger Pau Monne @ 2015-05-07 14:29 UTC (permalink / raw)
  To: xen-devel

Changes in this iteration include the addition of a non-contiguous allocator 
that's used in shadow_track_dirty_vram and hap_track_dirty_vram in order to 
allocate the temporary dirty bitmap.

Thanks, Roger.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory
  2015-05-07 14:29 [PATCH v4 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
@ 2015-05-07 14:29 ` Roger Pau Monne
  2015-05-07 15:22   ` Tim Deegan
  2015-05-07 15:29   ` Jan Beulich
  2015-05-07 14:29 ` [PATCH v4 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests Roger Pau Monne
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 15+ messages in thread
From: Roger Pau Monne @ 2015-05-07 14:29 UTC (permalink / raw)
  To: xen-devel
  Cc: Andrew Cooper, Tim Deegan, Ian Campbell, Jan Beulich, Roger Pau Monne

The allocator uses independent calls to alloc_heap_pages in order to get the
desired amount of memory and then maps all the independent physical
addresses into a contiguous virtual address space.

In order to keep track of this regions a red-black tree is used.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/common/page_alloc.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/mm.h    |   2 +
 2 files changed, 133 insertions(+)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 8500ed7..4ad5184 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -38,6 +38,7 @@
 #include <xen/event.h>
 #include <xen/tmem.h>
 #include <xen/tmem_xen.h>
+#include <xen/rbtree.h>
 #include <public/sysctl.h>
 #include <public/sched.h>
 #include <asm/page.h>
@@ -107,6 +108,13 @@ struct scrub_region {
 static struct scrub_region __initdata region[MAX_NUMNODES];
 static unsigned long __initdata chunk_size;
 
+static struct rb_root non_contiguous = { NULL, };
+struct va_page {
+    struct rb_node node;
+    void *va;
+    unsigned long mfn;
+};
+
 static void __init boot_bug(int line)
 {
     panic("Boot BUG at %s:%d", __FILE__, line);
@@ -1601,6 +1609,129 @@ void free_xenheap_pages(void *v, unsigned int order)
 
 #endif
 
+static struct va_page *va_xenheap_search(struct rb_root *root, void *va)
+{
+    struct rb_node *node = root->rb_node;
+
+    while ( node )
+    {
+        struct va_page *data = container_of(node, struct va_page, node);
+
+        if ( data->va == va )
+            return data;
+        if ( va < data->va )
+            node = node->rb_left;
+        else
+            node = node->rb_right;
+    }
+
+    return NULL;
+}
+
+static int va_xenheap_insert(struct rb_root *root, struct va_page *data)
+{
+    struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+    /* Figure out where to put new node */
+    while ( *new )
+    {
+        struct va_page *this = container_of(*new, struct va_page, node);
+
+        parent = *new;
+        if ( data->va < this->va )
+            new = &((*new)->rb_left);
+        else if ( data->va > this->va )
+            new = &((*new)->rb_right);
+        else
+            return -EEXIST;
+    }
+
+    /* Add new node and rebalance tree. */
+    rb_link_node(&data->node, parent, new);
+    rb_insert_color(&data->node, root);
+
+    return 0;
+}
+
+void *alloc_xenheap_noncontiguous(unsigned int pages, unsigned int memflags)
+{
+    unsigned long *mfn;
+    unsigned int i;
+    struct va_page *va_rb;
+    struct page_info *pg;
+    void *va = NULL;
+
+
+    mfn = xzalloc_array(unsigned long, pages);
+    if ( mfn == NULL )
+        return NULL;
+
+    for ( i = 0; i < pages; i++ )
+    {
+        pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, 1, memflags, NULL);
+        if ( pg == NULL )
+            goto error;
+        mfn[i] = page_to_mfn(pg);
+    }
+
+    va = vmap(mfn, pages);
+    if ( va == NULL )
+        goto error;
+
+    for ( i = 0; i < pages; i++ )
+    {
+        va_rb = xmalloc_bytes(sizeof(*va_rb));
+        if ( va_rb == NULL )
+            goto error;
+        va_rb->va = va + i * PAGE_SIZE;
+        va_rb->mfn = mfn[i];
+        BUG_ON(va_xenheap_insert(&non_contiguous, va_rb));
+    }
+
+    xfree(mfn);
+    return va;
+
+ error:
+    if ( va != NULL )
+    {
+        for ( i = 0; i < pages; i++ )
+        {
+            va_rb = va_xenheap_search(&non_contiguous, va + i * PAGE_SIZE);
+            if ( va_rb != NULL )
+            {
+                rb_erase(&va_rb->node, &non_contiguous);
+                xfree(va_rb);
+            }
+        }
+        vunmap(va);
+    }
+    for ( i = 0; i < pages; i++ )
+        if ( mfn[i] != 0 )
+            free_heap_pages(mfn_to_page(mfn[i]), 1);
+    xfree(mfn);
+    return NULL;
+}
+
+void free_xenheap_noncontiguous(void *va, unsigned int pages)
+{
+    struct va_page *va_rb;
+    int i;
+
+    if ( va == NULL || pages == 0 )
+        return;
+
+    vunmap(va);
+
+    for ( i = 0; i < pages; i++ )
+    {
+        va_rb = va_xenheap_search(&non_contiguous, va + i * PAGE_SIZE);
+        BUG_ON(va_rb == NULL);
+        free_heap_pages(mfn_to_page(va_rb->mfn), 1);
+        rb_erase(&va_rb->node, &non_contiguous);
+        xfree(va_rb);
+    }
+}
+
 
 
 /*************************
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index a066363..b1eae58 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -48,6 +48,8 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
 void free_xenheap_pages(void *v, unsigned int order);
 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
+void *alloc_xenheap_noncontiguous(unsigned int pages, unsigned int memflags);
+void free_xenheap_noncontiguous(void *va, unsigned int pages);
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(
     unsigned long virt,
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v4 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests
  2015-05-07 14:29 [PATCH v4 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
  2015-05-07 14:29 ` [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
@ 2015-05-07 14:29 ` Roger Pau Monne
  2015-05-07 15:27   ` Tim Deegan
  2015-05-07 14:29 ` [PATCH v4 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map Roger Pau Monne
  2015-05-07 14:29 ` [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests Roger Pau Monne
  3 siblings, 1 reply; 15+ messages in thread
From: Roger Pau Monne @ 2015-05-07 14:29 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Tim Deegan, Jan Beulich, Roger Pau Monne

Modify shadow_track_dirty_vram to use a local buffer and then flush to the
guest without the paging_lock held. This is modeled after
hap_track_dirty_vram.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes since v3:
 - Use the newly introduced alloc_xenheap_noncontiguous in order to allocate
   the temporary buffer.

Changes since v2:
 - Remove checks against memcpy, it always returns the address passed as
   dst.
 - Join the nested ifs in the out path.
 - Add Andrew Cooper Reviewed-by.
---
 xen/arch/x86/mm/shadow/common.c | 45 ++++++++++++++++++++++++-----------------
 1 file changed, 26 insertions(+), 19 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2e43d6d..c437147 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3516,7 +3516,7 @@ static void sh_clean_dirty_bitmap(struct domain *d)
 int shadow_track_dirty_vram(struct domain *d,
                             unsigned long begin_pfn,
                             unsigned long nr,
-                            XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
+                            XEN_GUEST_HANDLE_64(uint8) guest_dirty_bitmap)
 {
     int rc;
     unsigned long end_pfn = begin_pfn + nr;
@@ -3526,6 +3526,7 @@ int shadow_track_dirty_vram(struct domain *d,
     p2m_type_t t;
     struct sh_dirty_vram *dirty_vram;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    uint8_t *dirty_bitmap = NULL;
 
     if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 )
         return -EINVAL;
@@ -3554,6 +3555,13 @@ int shadow_track_dirty_vram(struct domain *d,
         goto out;
     }
 
+    dirty_bitmap = alloc_xenheap_noncontiguous(
+                             DIV_ROUND_UP(dirty_size, PAGE_SIZE), 0);
+    if ( dirty_bitmap == NULL )
+    {
+        rc = -ENOMEM;
+        goto out;
+    }
     /* This should happen seldomly (Video mode change),
      * no need to be careful. */
     if ( !dirty_vram )
@@ -3584,12 +3592,8 @@ int shadow_track_dirty_vram(struct domain *d,
         rc = -ENODATA;
     }
     else if (dirty_vram->last_dirty == -1)
-    {
         /* still completely clean, just copy our empty bitmap */
-        rc = -EFAULT;
-        if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 )
-            rc = 0;
-    }
+        memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
     else
     {
         unsigned long map_mfn = INVALID_MFN;
@@ -3668,21 +3672,19 @@ int shadow_track_dirty_vram(struct domain *d,
         if ( map_sl1p )
             sh_unmap_domain_page(map_sl1p);
 
-        rc = -EFAULT;
-        if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 ) {
-            memset(dirty_vram->dirty_bitmap, 0, dirty_size);
-            if (dirty_vram->last_dirty + SECONDS(2) < NOW())
+        memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
+        memset(dirty_vram->dirty_bitmap, 0, dirty_size);
+        if ( dirty_vram->last_dirty + SECONDS(2) < NOW() )
+        {
+            /* was clean for more than two seconds, try to disable guest
+             * write access */
+            for ( i = begin_pfn; i < end_pfn; i++ )
             {
-                /* was clean for more than two seconds, try to disable guest
-                 * write access */
-                for ( i = begin_pfn; i < end_pfn; i++ ) {
-                    mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
-                    if (mfn_x(mfn) != INVALID_MFN)
-                        flush_tlb |= sh_remove_write_access(d, mfn, 1, 0);
-                }
-                dirty_vram->last_dirty = -1;
+                mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
+                if ( mfn_x(mfn) != INVALID_MFN )
+                    flush_tlb |= sh_remove_write_access(d, mfn, 1, 0);
             }
-            rc = 0;
+            dirty_vram->last_dirty = -1;
         }
     }
     if ( flush_tlb )
@@ -3697,6 +3699,11 @@ out_dirty_vram:
 
 out:
     paging_unlock(d);
+    if ( rc == 0 && dirty_bitmap != NULL &&
+         copy_to_guest(guest_dirty_bitmap, dirty_bitmap, dirty_size) )
+        rc = -EFAULT;
+    free_xenheap_noncontiguous(dirty_bitmap,
+                               DIV_ROUND_UP(dirty_size, PAGE_SIZE));
     p2m_unlock(p2m_get_hostp2m(d));
     return rc;
 }
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v4 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map
  2015-05-07 14:29 [PATCH v4 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
  2015-05-07 14:29 ` [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
  2015-05-07 14:29 ` [PATCH v4 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests Roger Pau Monne
@ 2015-05-07 14:29 ` Roger Pau Monne
  2015-05-07 15:29   ` Tim Deegan
  2015-05-07 14:29 ` [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests Roger Pau Monne
  3 siblings, 1 reply; 15+ messages in thread
From: Roger Pau Monne @ 2015-05-07 14:29 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Tim Deegan, Jan Beulich, Roger Pau Monne

Just like it's done for shadow_track_dirty_vram allocate the temporary
buffer using non-contiguous memory.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/mm/hap/hap.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index c28a6e3..61123b0 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -74,11 +74,10 @@ int hap_track_dirty_vram(struct domain *d,
     long rc = 0;
     struct sh_dirty_vram *dirty_vram;
     uint8_t *dirty_bitmap = NULL;
+    int size = DIV_ROUND_UP(nr, BITS_PER_BYTE);
 
     if ( nr )
     {
-        int size = (nr + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
-
         if ( !paging_mode_log_dirty(d) )
         {
             rc = paging_log_dirty_enable(d, 0);
@@ -87,7 +86,8 @@ int hap_track_dirty_vram(struct domain *d,
         }
 
         rc = -ENOMEM;
-        dirty_bitmap = xzalloc_bytes(size);
+        dirty_bitmap = alloc_xenheap_noncontiguous(
+                             DIV_ROUND_UP(size, PAGE_SIZE), 0);
         if ( !dirty_bitmap )
             goto out;
 
@@ -174,7 +174,7 @@ int hap_track_dirty_vram(struct domain *d,
                                   p2m_ram_logdirty, p2m_ram_rw);
     }
 out:
-    xfree(dirty_bitmap);
+    free_xenheap_noncontiguous(dirty_bitmap, DIV_ROUND_UP(size, PAGE_SIZE));
 
     return rc;
 }
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests
  2015-05-07 14:29 [PATCH v4 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
                   ` (2 preceding siblings ...)
  2015-05-07 14:29 ` [PATCH v4 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map Roger Pau Monne
@ 2015-05-07 14:29 ` Roger Pau Monne
  2015-05-07 15:48   ` Jan Beulich
  3 siblings, 1 reply; 15+ messages in thread
From: Roger Pau Monne @ 2015-05-07 14:29 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Tim Deegan, Jan Beulich, Roger Pau Monne

When the caller of paging_log_dirty_op is a hvm guest Xen would choke when
trying to copy the dirty bitmap to the guest because the paging lock is
already held.

Fix this by independently mapping each page of the guest bitmap as needed
without the paging lock held.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes since v3:
 - Drop last parameter from map_dirty_bitmap.
 - Drop pointless initializers in paging_log_dirty_op.
 - Add a new field to paging_domain in order to copy i2 position.
 - Move the again case up to make sure we don't hold cached values of the
   contents of log_dirty.
 - Replace the BUG_ON in paging_log_dirty_op with an ASSERT.

Changes since v2:
 - Add checks for p2m_is_ram and p2m_is_discard_write when mapping a guest
   page.
 - Remove error checking from memset/memcpy, they unconditionally return
   dst.
---
 xen/arch/x86/mm/paging.c     | 102 ++++++++++++++++++++++++++++++++++++++-----
 xen/include/asm-x86/domain.h |   1 +
 2 files changed, 92 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 59d4720..36a0cde 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -408,6 +408,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
     return rv;
 }
 
+static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
+                                     unsigned long pages,
+                                     struct page_info **page)
+{
+    uint32_t pfec = PFEC_page_present | PFEC_write_access;
+    unsigned long gfn;
+    p2m_type_t p2mt;
+
+    gfn = paging_gva_to_gfn(current,
+                            (paddr_t)(dirty_bitmap.p + (pages >> 3)),
+                            &pfec);
+    if ( gfn == INVALID_GFN )
+        return NULL;
+
+    *page = get_page_from_gfn(current->domain, gfn, &p2mt, P2M_UNSHARE);
+
+    if ( !p2m_is_ram(p2mt) )
+    {
+        put_page(*page);
+        return NULL;
+    }
+    if ( p2m_is_paging(p2mt) )
+    {
+        put_page(*page);
+        p2m_mem_paging_populate(current->domain, gfn);
+        return NULL;
+    }
+    if ( p2m_is_shared(p2mt) || p2m_is_discard_write(p2mt) )
+    {
+        put_page(*page);
+        return NULL;
+    }
+
+    return __map_domain_page(*page);
+}
+
+static inline void unmap_dirty_bitmap(void *addr, struct page_info *page)
+{
+    if ( addr != NULL )
+    {
+        unmap_domain_page(addr);
+        put_page(page);
+    }
+}
+
 
 /* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN,
  * clear the bitmap and stats as well. */
@@ -420,6 +465,9 @@ static int paging_log_dirty_op(struct domain *d,
     mfn_t *l4 = NULL, *l3 = NULL, *l2 = NULL;
     unsigned long *l1 = NULL;
     int i4, i3, i2;
+    uint8_t *dirty_bitmap;
+    struct page_info *page;
+    unsigned long index_mapped;
 
     if ( !resuming )
     {
@@ -433,6 +481,14 @@ static int paging_log_dirty_op(struct domain *d,
         p2m_flush_hardware_cached_dirty(d);
     }
 
+    index_mapped = resuming ? d->arch.paging.preempt.log_dirty.done : 0;
+    dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, index_mapped, &page);
+    if ( dirty_bitmap == NULL )
+    {
+        domain_unpause(d);
+        return -EFAULT;
+    }
+
     paging_lock(d);
 
     if ( !d->arch.paging.preempt.dom )
@@ -455,6 +511,7 @@ static int paging_log_dirty_op(struct domain *d,
                  d->arch.paging.log_dirty.fault_count,
                  d->arch.paging.log_dirty.dirty_count);
 
+again:
     sc->stats.fault_count = d->arch.paging.log_dirty.fault_count;
     sc->stats.dirty_count = d->arch.paging.log_dirty.dirty_count;
 
@@ -472,18 +529,18 @@ static int paging_log_dirty_op(struct domain *d,
     l4 = paging_map_log_dirty_bitmap(d);
     i4 = d->arch.paging.preempt.log_dirty.i4;
     i3 = d->arch.paging.preempt.log_dirty.i3;
+    i2 = d->arch.paging.preempt.log_dirty.i2;
     pages = d->arch.paging.preempt.log_dirty.done;
 
     for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
     {
         l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
-        for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
+        for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
+             i3++, i2 = 0 )
         {
             l2 = ((l3 && mfn_valid(l3[i3])) ?
                   map_domain_page(mfn_x(l3[i3])) : NULL);
-            for ( i2 = 0;
-                  (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
-                  i2++ )
+            for ( ; (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ )
             {
                 unsigned int bytes = PAGE_SIZE;
                 l1 = ((l2 && mfn_valid(l2[i2])) ?
@@ -492,15 +549,34 @@ static int paging_log_dirty_op(struct domain *d,
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
                 if ( likely(peek) )
                 {
-                    if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap,
-                                                    pages >> 3, (uint8_t *)l1,
-                                                    bytes)
-                             : clear_guest_offset(sc->dirty_bitmap,
-                                                  pages >> 3, bytes)) != 0 )
+                    if ( pages >> (3 + PAGE_SHIFT) !=
+                         index_mapped >> (3 + PAGE_SHIFT) )
                     {
-                        rv = -EFAULT;
-                        goto out;
+                        /* We need to map next page */
+                        paging_unlock(d);
+                        unmap_dirty_bitmap(dirty_bitmap, page);
+                        index_mapped = pages;
+                        dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, pages,
+                                                        &page);
+                        paging_lock(d);
+                        if ( dirty_bitmap == NULL )
+                        {
+                            rv = -EFAULT;
+                            goto out;
+                        }
+                        d->arch.paging.preempt.log_dirty.i4 = i4;
+                        d->arch.paging.preempt.log_dirty.i3 = i3;
+                        d->arch.paging.preempt.log_dirty.i2 = i2;
+                        d->arch.paging.preempt.log_dirty.done = pages;
+                        goto again;
                     }
+                    ASSERT(((pages >> 3) % PAGE_SIZE) + bytes <= PAGE_SIZE);
+                    if ( l1 )
+                        memcpy(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), l1,
+                               bytes);
+                    else
+                        memset(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), 0,
+                               bytes);
                 }
                 pages += bytes << 3;
                 if ( l1 )
@@ -517,6 +593,7 @@ static int paging_log_dirty_op(struct domain *d,
             {
                 d->arch.paging.preempt.log_dirty.i4 = i4;
                 d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
+                d->arch.paging.preempt.log_dirty.i2 = 0;
                 rv = -ERESTART;
                 break;
             }
@@ -529,6 +606,7 @@ static int paging_log_dirty_op(struct domain *d,
         {
             d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
             d->arch.paging.preempt.log_dirty.i3 = 0;
+            d->arch.paging.preempt.log_dirty.i2 = 0;
             rv = -ERESTART;
         }
         if ( rv )
@@ -570,12 +648,14 @@ static int paging_log_dirty_op(struct domain *d,
          * paging modes (shadow or hap).  Safe because the domain is paused. */
         d->arch.paging.log_dirty.clean_dirty_bitmap(d);
     }
+    unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
     return rv;
 
  out:
     d->arch.paging.preempt.dom = NULL;
     paging_unlock(d);
+    unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
 
     if ( l1 )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 3f83e8b..e364a2c 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -202,6 +202,7 @@ struct paging_domain {
                 unsigned long done:PADDR_BITS - PAGE_SHIFT;
                 unsigned long i4:PAGETABLE_ORDER;
                 unsigned long i3:PAGETABLE_ORDER;
+                unsigned long i2:PAGETABLE_ORDER;
             } log_dirty;
         };
     } preempt;
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory
  2015-05-07 14:29 ` [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
@ 2015-05-07 15:22   ` Tim Deegan
  2015-05-07 15:32     ` Tim Deegan
  2015-05-07 15:29   ` Jan Beulich
  1 sibling, 1 reply; 15+ messages in thread
From: Tim Deegan @ 2015-05-07 15:22 UTC (permalink / raw)
  To: Roger Pau Monne; +Cc: xen-devel, Ian Campbell, Jan Beulich, Andrew Cooper

At 16:29 +0200 on 07 May (1431016173), Roger Pau Monne wrote:
> The allocator uses independent calls to alloc_heap_pages in order to get the
> desired amount of memory and then maps all the independent physical
> addresses into a contiguous virtual address space.
> 
> In order to keep track of this regions a red-black tree is used.

On x86, at least, the vmap apparatus should be able to tell you what
MFN is mapped by using the linear map to look at the l1e.  On ARM, it
looks like we could walk the xen pagetable trie from xen_second[] for
the same effect.

So I'd suggest adding a vmap_to_mfn() and usign that instead of
building the red-black tree. 

Cheers,

Tim.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests
  2015-05-07 14:29 ` [PATCH v4 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests Roger Pau Monne
@ 2015-05-07 15:27   ` Tim Deegan
  0 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2015-05-07 15:27 UTC (permalink / raw)
  To: Roger Pau Monne; +Cc: xen-devel, Jan Beulich, Andrew Cooper

At 16:29 +0200 on 07 May (1431016174), Roger Pau Monne wrote:
> Modify shadow_track_dirty_vram to use a local buffer and then flush to the
> guest without the paging_lock held. This is modeled after
> hap_track_dirty_vram.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>

Reviewed-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map
  2015-05-07 14:29 ` [PATCH v4 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map Roger Pau Monne
@ 2015-05-07 15:29   ` Tim Deegan
  0 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2015-05-07 15:29 UTC (permalink / raw)
  To: Roger Pau Monne; +Cc: xen-devel, Jan Beulich, Andrew Cooper

At 16:29 +0200 on 07 May (1431016175), Roger Pau Monne wrote:
> Just like it's done for shadow_track_dirty_vram allocate the temporary
> buffer using non-contiguous memory.

Reviewed-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory
  2015-05-07 14:29 ` [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
  2015-05-07 15:22   ` Tim Deegan
@ 2015-05-07 15:29   ` Jan Beulich
  1 sibling, 0 replies; 15+ messages in thread
From: Jan Beulich @ 2015-05-07 15:29 UTC (permalink / raw)
  To: Roger Pau Monne; +Cc: Andrew Cooper, Tim Deegan, Ian Campbell, xen-devel

>>> On 07.05.15 at 16:29, <roger.pau@citrix.com> wrote:
> The allocator uses independent calls to alloc_heap_pages in order to get the
> desired amount of memory and then maps all the independent physical
> addresses into a contiguous virtual address space.
> 
> In order to keep track of this regions a red-black tree is used.

I don't think this is needed (and makes the code needlessly large) -
reading the MFNs out of the page table entries during vfree()
should be quite fine.

Having named vfree() you may already guess that I'd also prefer
these to be called vmalloc()/vfree(), the more that I can't see why
they're being restricted to the Xen heap (rather than using
anonymous domheap allocations).

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory
  2015-05-07 15:22   ` Tim Deegan
@ 2015-05-07 15:32     ` Tim Deegan
  2015-05-07 15:52       ` Jan Beulich
  0 siblings, 1 reply; 15+ messages in thread
From: Tim Deegan @ 2015-05-07 15:32 UTC (permalink / raw)
  To: Roger Pau Monne; +Cc: xen-devel, Ian Campbell, Jan Beulich, Andrew Cooper

At 16:22 +0100 on 07 May (1431015746), Tim Deegan wrote:
> At 16:29 +0200 on 07 May (1431016173), Roger Pau Monne wrote:
> > The allocator uses independent calls to alloc_heap_pages in order to get the
> > desired amount of memory and then maps all the independent physical
> > addresses into a contiguous virtual address space.
> > 
> > In order to keep track of this regions a red-black tree is used.
> 
> On x86, at least, the vmap apparatus should be able to tell you what
> MFN is mapped by using the linear map to look at the l1e.  On ARM, it
> looks like we could walk the xen pagetable trie from xen_second[] for
> the same effect.
> 
> So I'd suggest adding a vmap_to_mfn() and usign that instead of
> building the red-black tree. 

Also: does this need a clear_page() somewhere to scrub the allocation?
If not here, I think it's needed in patches 2 and 3 where you're
replacing xzalloc (unless you're sure that the loop will write every
byte of the allocation, in which case xzalloc was overkill).

Tim.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests
  2015-05-07 14:29 ` [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests Roger Pau Monne
@ 2015-05-07 15:48   ` Jan Beulich
  2015-05-08  8:28     ` Roger Pau Monné
  0 siblings, 1 reply; 15+ messages in thread
From: Jan Beulich @ 2015-05-07 15:48 UTC (permalink / raw)
  To: Roger Pau Monne; +Cc: Andrew Cooper, Tim Deegan, xen-devel

>>> On 07.05.15 at 16:29, <roger.pau@citrix.com> wrote:
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -408,6 +408,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
>      return rv;
>  }
>  
> +static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
> +                                     unsigned long pages,
> +                                     struct page_info **page)
> +{
> +    uint32_t pfec = PFEC_page_present | PFEC_write_access;
> +    unsigned long gfn;
> +    p2m_type_t p2mt;
> +
> +    gfn = paging_gva_to_gfn(current,
> +                            (paddr_t)(dirty_bitmap.p + (pages >> 3)),

Why paddr_t?

> @@ -455,6 +511,7 @@ static int paging_log_dirty_op(struct domain *d,
>                   d->arch.paging.log_dirty.fault_count,
>                   d->arch.paging.log_dirty.dirty_count);
>  
> +again:

Labels indented by at least one space please.

> @@ -472,18 +529,18 @@ static int paging_log_dirty_op(struct domain *d,
>      l4 = paging_map_log_dirty_bitmap(d);
>      i4 = d->arch.paging.preempt.log_dirty.i4;
>      i3 = d->arch.paging.preempt.log_dirty.i3;
> +    i2 = d->arch.paging.preempt.log_dirty.i2;

I don't see why this needs to be stored in struct domain - upon being
preempted you never seem to leave this non-zero.

> @@ -492,15 +549,34 @@ static int paging_log_dirty_op(struct domain *d,
>                      bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
>                  if ( likely(peek) )
>                  {
> -                    if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap,
> -                                                    pages >> 3, (uint8_t *)l1,
> -                                                    bytes)
> -                             : clear_guest_offset(sc->dirty_bitmap,
> -                                                  pages >> 3, bytes)) != 0 )
> +                    if ( pages >> (3 + PAGE_SHIFT) !=
> +                         index_mapped >> (3 + PAGE_SHIFT) )
>                      {
> -                        rv = -EFAULT;
> -                        goto out;
> +                        /* We need to map next page */
> +                        paging_unlock(d);
> +                        unmap_dirty_bitmap(dirty_bitmap, page);
> +                        index_mapped = pages;
> +                        dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, pages,
> +                                                        &page);
> +                        paging_lock(d);
> +                        if ( dirty_bitmap == NULL )
> +                        {
> +                            rv = -EFAULT;
> +                            goto out;
> +                        }
> +                        d->arch.paging.preempt.log_dirty.i4 = i4;
> +                        d->arch.paging.preempt.log_dirty.i3 = i3;
> +                        d->arch.paging.preempt.log_dirty.i2 = i2;
> +                        d->arch.paging.preempt.log_dirty.done = pages;
> +                        goto again;

I think you need to update the state before dropping the paging lock.
And that would then actually seem to make the new i2 field necessary.

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory
  2015-05-07 15:32     ` Tim Deegan
@ 2015-05-07 15:52       ` Jan Beulich
  0 siblings, 0 replies; 15+ messages in thread
From: Jan Beulich @ 2015-05-07 15:52 UTC (permalink / raw)
  To: Roger Pau Monne, Tim Deegan; +Cc: Andrew Cooper, Ian Campbell, xen-devel

>>> On 07.05.15 at 17:32, <tim@xen.org> wrote:
> At 16:22 +0100 on 07 May (1431015746), Tim Deegan wrote:
>> At 16:29 +0200 on 07 May (1431016173), Roger Pau Monne wrote:
>> > The allocator uses independent calls to alloc_heap_pages in order to get 
> the
>> > desired amount of memory and then maps all the independent physical
>> > addresses into a contiguous virtual address space.
>> > 
>> > In order to keep track of this regions a red-black tree is used.
>> 
>> On x86, at least, the vmap apparatus should be able to tell you what
>> MFN is mapped by using the linear map to look at the l1e.  On ARM, it
>> looks like we could walk the xen pagetable trie from xen_second[] for
>> the same effect.
>> 
>> So I'd suggest adding a vmap_to_mfn() and usign that instead of
>> building the red-black tree. 
> 
> Also: does this need a clear_page() somewhere to scrub the allocation?
> If not here, I think it's needed in patches 2 and 3 where you're
> replacing xzalloc (unless you're sure that the loop will write every
> byte of the allocation, in which case xzalloc was overkill).

Just like we have xmalloc() and xzalloc() I think we should have
vmalloc() and vzalloc().

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests
  2015-05-07 15:48   ` Jan Beulich
@ 2015-05-08  8:28     ` Roger Pau Monné
  2015-05-08  8:40       ` Jan Beulich
  0 siblings, 1 reply; 15+ messages in thread
From: Roger Pau Monné @ 2015-05-08  8:28 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Andrew Cooper, Tim Deegan, xen-devel

El 07/05/15 a les 17.48, Jan Beulich ha escrit:
>>>> On 07.05.15 at 16:29, <roger.pau@citrix.com> wrote:
>> --- a/xen/arch/x86/mm/paging.c
>> +++ b/xen/arch/x86/mm/paging.c
>> @@ -408,6 +408,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
>>      return rv;
>>  }
>>  
>> +static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
>> +                                     unsigned long pages,
>> +                                     struct page_info **page)
>> +{
>> +    uint32_t pfec = PFEC_page_present | PFEC_write_access;
>> +    unsigned long gfn;
>> +    p2m_type_t p2mt;
>> +
>> +    gfn = paging_gva_to_gfn(current,
>> +                            (paddr_t)(dirty_bitmap.p + (pages >> 3)),
> 
> Why paddr_t?

Without it I get:

paging.c:421:29: error: passing argument 2 of 'paging_gva_to_gfn' makes
integer from pointer without a cast [-Werror]

>> @@ -455,6 +511,7 @@ static int paging_log_dirty_op(struct domain *d,
>>                   d->arch.paging.log_dirty.fault_count,
>>                   d->arch.paging.log_dirty.dirty_count);
>>  
>> +again:
> 
> Labels indented by at least one space please.
>
>> @@ -472,18 +529,18 @@ static int paging_log_dirty_op(struct domain *d,
>>      l4 = paging_map_log_dirty_bitmap(d);
>>      i4 = d->arch.paging.preempt.log_dirty.i4;
>>      i3 = d->arch.paging.preempt.log_dirty.i3;
>> +    i2 = d->arch.paging.preempt.log_dirty.i2;
> 
> I don't see why this needs to be stored in struct domain - upon being
> preempted you never seem to leave this non-zero.
> 
>> @@ -492,15 +549,34 @@ static int paging_log_dirty_op(struct domain *d,
>>                      bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
>>                  if ( likely(peek) )
>>                  {
>> -                    if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap,
>> -                                                    pages >> 3, (uint8_t *)l1,
>> -                                                    bytes)
>> -                             : clear_guest_offset(sc->dirty_bitmap,
>> -                                                  pages >> 3, bytes)) != 0 )
>> +                    if ( pages >> (3 + PAGE_SHIFT) !=
>> +                         index_mapped >> (3 + PAGE_SHIFT) )
>>                      {
>> -                        rv = -EFAULT;
>> -                        goto out;
>> +                        /* We need to map next page */
>> +                        paging_unlock(d);
>> +                        unmap_dirty_bitmap(dirty_bitmap, page);
>> +                        index_mapped = pages;
>> +                        dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, pages,
>> +                                                        &page);
>> +                        paging_lock(d);
>> +                        if ( dirty_bitmap == NULL )
>> +                        {
>> +                            rv = -EFAULT;
>> +                            goto out;
>> +                        }
>> +                        d->arch.paging.preempt.log_dirty.i4 = i4;
>> +                        d->arch.paging.preempt.log_dirty.i3 = i3;
>> +                        d->arch.paging.preempt.log_dirty.i2 = i2;
>> +                        d->arch.paging.preempt.log_dirty.done = pages;
>> +                        goto again;
> 
> I think you need to update the state before dropping the paging lock.
> And that would then actually seem to make the new i2 field necessary.

I've moved it so the fields are set before dropping the paging lock.

Roger.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests
  2015-05-08  8:28     ` Roger Pau Monné
@ 2015-05-08  8:40       ` Jan Beulich
  2015-05-08  8:51         ` Roger Pau Monné
  0 siblings, 1 reply; 15+ messages in thread
From: Jan Beulich @ 2015-05-08  8:40 UTC (permalink / raw)
  To: Roger Pau Monné; +Cc: Andrew Cooper, Tim Deegan, xen-devel

>>> On 08.05.15 at 10:28, <roger.pau@citrix.com> wrote:
> El 07/05/15 a les 17.48, Jan Beulich ha escrit:
>>>>> On 07.05.15 at 16:29, <roger.pau@citrix.com> wrote:
>>> --- a/xen/arch/x86/mm/paging.c
>>> +++ b/xen/arch/x86/mm/paging.c
>>> @@ -408,6 +408,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
>>>      return rv;
>>>  }
>>>  
>>> +static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) 
> dirty_bitmap,
>>> +                                     unsigned long pages,
>>> +                                     struct page_info **page)
>>> +{
>>> +    uint32_t pfec = PFEC_page_present | PFEC_write_access;
>>> +    unsigned long gfn;
>>> +    p2m_type_t p2mt;
>>> +
>>> +    gfn = paging_gva_to_gfn(current,
>>> +                            (paddr_t)(dirty_bitmap.p + (pages >> 3)),
>> 
>> Why paddr_t?
> 
> Without it I get:
> 
> paging.c:421:29: error: passing argument 2 of 'paging_gva_to_gfn' makes
> integer from pointer without a cast [-Werror]

I didn't object to the cast, but to the type used in it: This is a virtual
(guest) address, and the respective function parameter has type
"unsigned long".

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests
  2015-05-08  8:40       ` Jan Beulich
@ 2015-05-08  8:51         ` Roger Pau Monné
  0 siblings, 0 replies; 15+ messages in thread
From: Roger Pau Monné @ 2015-05-08  8:51 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Andrew Cooper, Tim Deegan, xen-devel

El 08/05/15 a les 10.40, Jan Beulich ha escrit:
>>>> On 08.05.15 at 10:28, <roger.pau@citrix.com> wrote:
>> El 07/05/15 a les 17.48, Jan Beulich ha escrit:
>>>>>> On 07.05.15 at 16:29, <roger.pau@citrix.com> wrote:
>>>> --- a/xen/arch/x86/mm/paging.c
>>>> +++ b/xen/arch/x86/mm/paging.c
>>>> @@ -408,6 +408,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
>>>>      return rv;
>>>>  }
>>>>  
>>>> +static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) 
>> dirty_bitmap,
>>>> +                                     unsigned long pages,
>>>> +                                     struct page_info **page)
>>>> +{
>>>> +    uint32_t pfec = PFEC_page_present | PFEC_write_access;
>>>> +    unsigned long gfn;
>>>> +    p2m_type_t p2mt;
>>>> +
>>>> +    gfn = paging_gva_to_gfn(current,
>>>> +                            (paddr_t)(dirty_bitmap.p + (pages >> 3)),
>>>
>>> Why paddr_t?
>>
>> Without it I get:
>>
>> paging.c:421:29: error: passing argument 2 of 'paging_gva_to_gfn' makes
>> integer from pointer without a cast [-Werror]
> 
> I didn't object to the cast, but to the type used in it: This is a virtual
> (guest) address, and the respective function parameter has type
> "unsigned long".

Right, thanks, I have no idea why I've used paddr_t, will be fixed in
next iteration.

Roger.

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2015-05-08  8:51 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-07 14:29 [PATCH v4 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
2015-05-07 14:29 ` [PATCH v4 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
2015-05-07 15:22   ` Tim Deegan
2015-05-07 15:32     ` Tim Deegan
2015-05-07 15:52       ` Jan Beulich
2015-05-07 15:29   ` Jan Beulich
2015-05-07 14:29 ` [PATCH v4 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests Roger Pau Monne
2015-05-07 15:27   ` Tim Deegan
2015-05-07 14:29 ` [PATCH v4 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map Roger Pau Monne
2015-05-07 15:29   ` Tim Deegan
2015-05-07 14:29 ` [PATCH v4 4/4] xen: rework paging_log_dirty_op to work with hvm guests Roger Pau Monne
2015-05-07 15:48   ` Jan Beulich
2015-05-08  8:28     ` Roger Pau Monné
2015-05-08  8:40       ` Jan Beulich
2015-05-08  8:51         ` Roger Pau Monné

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.