All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0
@ 2015-05-14 15:06 Roger Pau Monne
  2015-05-14 15:06 ` [PATCH v7 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Roger Pau Monne @ 2015-05-14 15:06 UTC (permalink / raw)
  To: xen-devel

Changes in this revision include using clear_page instead of memset in 
vzalloc and minor fixes to paging_log_dirty_op in order to make the page 
unmapping/mapping more similar to the preempt case.

Thanks, Roger.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v7 1/4] xen: introduce a helper to allocate non-contiguous memory
  2015-05-14 15:06 [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
@ 2015-05-14 15:06 ` Roger Pau Monne
  2015-05-14 15:06 ` [PATCH v7 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests Roger Pau Monne
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Roger Pau Monne @ 2015-05-14 15:06 UTC (permalink / raw)
  To: xen-devel
  Cc: Ian Campbell, Andrew Cooper, Tim Deegan, Stefano Stabellini,
	Jan Beulich, Roger Pau Monne

The allocator uses independent calls to alloc_domheap_pages in order to get
the desired amount of memory and then maps all the independent physical
addresses into a contiguous virtual address space.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Tested-by: Julien Grall <julien.grall@citrix.com> (ARM)
Cc: Stefano Stabellini <stefano.stabellini@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
Change since v6:
 - Use clear_page instead of memset in vzalloc.

Changes since v5:
 - Call vunmap before freeing the pages in vfree.
 - Simplify the fail case by reusing i.

Changes since v4:
 - Remove the red-black tree, the same can be achieved by doing a VA -> MFN
   translation.
 - Provide a vzalloc that zeroes the allocated memory area.
 - Allocate domheap annonymous pages.
---
 xen/common/vmap.c          | 67 ++++++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/mm.h   |  2 ++
 xen/include/asm-x86/page.h |  2 ++
 xen/include/xen/vmap.h     |  3 +++
 4 files changed, 74 insertions(+)

diff --git a/xen/common/vmap.c b/xen/common/vmap.c
index 739d468..4519e0c 100644
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -215,4 +215,71 @@ void vunmap(const void *va)
 #endif
     vm_free(va);
 }
+
+void *vmalloc(size_t size)
+{
+    unsigned long *mfn;
+    size_t pages, i;
+    struct page_info *pg;
+    void *va;
+
+    ASSERT( size != 0 );
+
+    pages = PFN_UP(size);
+    mfn = xmalloc_array(unsigned long, pages);
+    if ( mfn == NULL )
+        return NULL;
+
+    for ( i = 0; i < pages; i++ )
+    {
+        pg = alloc_domheap_page(NULL, 0);
+        if ( pg == NULL )
+            goto error;
+        mfn[i] = page_to_mfn(pg);
+    }
+
+    va = vmap(mfn, pages);
+    if ( va == NULL )
+        goto error;
+
+    xfree(mfn);
+    return va;
+
+ error:
+    while ( i-- )
+         free_domheap_page(mfn_to_page(mfn[i]));
+    xfree(mfn);
+    return NULL;
+}
+
+void *vzalloc(size_t size)
+{
+    void *p = vmalloc(size);
+    int i;
+
+    if ( p == NULL )
+        return NULL;
+
+    for ( i = 0; i < size; i += PAGE_SIZE )
+        clear_page(p + i);
+
+    return p;
+}
+
+void vfree(void *va)
+{
+    unsigned int i, pages = vm_size(va);
+    struct page_info *pg;
+    PAGE_LIST_HEAD(pg_list);
+
+    ASSERT( pages != 0 );
+
+    for ( i = 0; i < pages; i++ )
+        page_list_add(vmap_to_page(va + i * PAGE_SIZE), &pg_list);
+
+    vunmap(va);
+
+    while ( (pg = page_list_remove_head(&pg_list)) != NULL )
+        free_domheap_page(pg);
+}
 #endif
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index d25e485..c0afcec 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -208,6 +208,8 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
 #define paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
+#define vmap_to_mfn(va)     paddr_to_pfn(virt_to_maddr((vaddr_t)va))
+#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
 /* Page-align address and convert to frame number format */
 #define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index a8bc999..8977a76 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -262,6 +262,8 @@ void copy_page_sse2(void *, const void *);
 #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
 #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
+#define vmap_to_mfn(va)     l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va)))
+#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
 #endif /* !defined(__ASSEMBLY__) */
 
diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h
index b1923dd..a13591d 100644
--- a/xen/include/xen/vmap.h
+++ b/xen/include/xen/vmap.h
@@ -11,6 +11,9 @@ void *__vmap(const unsigned long *mfn, unsigned int granularity,
              unsigned int nr, unsigned int align, unsigned int flags);
 void *vmap(const unsigned long *mfn, unsigned int nr);
 void vunmap(const void *);
+void *vmalloc(size_t size);
+void *vzalloc(size_t size);
+void vfree(void *va);
 
 void __iomem *ioremap(paddr_t, size_t);
 
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v7 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests
  2015-05-14 15:06 [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
  2015-05-14 15:06 ` [PATCH v7 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
@ 2015-05-14 15:06 ` Roger Pau Monne
  2015-05-22  7:59   ` Jan Beulich
  2015-05-14 15:06 ` [PATCH v7 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map Roger Pau Monne
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 7+ messages in thread
From: Roger Pau Monne @ 2015-05-14 15:06 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Tim Deegan, Jan Beulich, Roger Pau Monne

Modify shadow_track_dirty_vram to use a local buffer and then flush to the
guest without the paging_lock held. This is modeled after
hap_track_dirty_vram.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Cc: Tim Deegan <tim@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes since v5:
 - Restore dirty bits if copy to guest fails.

Changes since v4:
 - Use newly introduced vzalloc that zeroes memory.

Changes since v3:
 - Use the newly introduced alloc_xenheap_noncontiguous in order to allocate
   the temporary buffer.

Changes since v2:
 - Remove checks against memcpy, it always returns the address passed as
   dst.
 - Join the nested ifs in the out path.
 - Add Andrew Cooper Reviewed-by.
---
 xen/arch/x86/mm/shadow/common.c | 49 +++++++++++++++++++++++++----------------
 1 file changed, 30 insertions(+), 19 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2e43d6d..9e9d19f 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3516,7 +3516,7 @@ static void sh_clean_dirty_bitmap(struct domain *d)
 int shadow_track_dirty_vram(struct domain *d,
                             unsigned long begin_pfn,
                             unsigned long nr,
-                            XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
+                            XEN_GUEST_HANDLE_64(uint8) guest_dirty_bitmap)
 {
     int rc;
     unsigned long end_pfn = begin_pfn + nr;
@@ -3526,6 +3526,7 @@ int shadow_track_dirty_vram(struct domain *d,
     p2m_type_t t;
     struct sh_dirty_vram *dirty_vram;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    uint8_t *dirty_bitmap = NULL;
 
     if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 )
         return -EINVAL;
@@ -3554,6 +3555,12 @@ int shadow_track_dirty_vram(struct domain *d,
         goto out;
     }
 
+    dirty_bitmap = vzalloc(dirty_size);
+    if ( dirty_bitmap == NULL )
+    {
+        rc = -ENOMEM;
+        goto out;
+    }
     /* This should happen seldomly (Video mode change),
      * no need to be careful. */
     if ( !dirty_vram )
@@ -3584,12 +3591,8 @@ int shadow_track_dirty_vram(struct domain *d,
         rc = -ENODATA;
     }
     else if (dirty_vram->last_dirty == -1)
-    {
         /* still completely clean, just copy our empty bitmap */
-        rc = -EFAULT;
-        if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 )
-            rc = 0;
-    }
+        memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
     else
     {
         unsigned long map_mfn = INVALID_MFN;
@@ -3668,21 +3671,19 @@ int shadow_track_dirty_vram(struct domain *d,
         if ( map_sl1p )
             sh_unmap_domain_page(map_sl1p);
 
-        rc = -EFAULT;
-        if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 ) {
-            memset(dirty_vram->dirty_bitmap, 0, dirty_size);
-            if (dirty_vram->last_dirty + SECONDS(2) < NOW())
+        memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
+        memset(dirty_vram->dirty_bitmap, 0, dirty_size);
+        if ( dirty_vram->last_dirty + SECONDS(2) < NOW() )
+        {
+            /* was clean for more than two seconds, try to disable guest
+             * write access */
+            for ( i = begin_pfn; i < end_pfn; i++ )
             {
-                /* was clean for more than two seconds, try to disable guest
-                 * write access */
-                for ( i = begin_pfn; i < end_pfn; i++ ) {
-                    mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
-                    if (mfn_x(mfn) != INVALID_MFN)
-                        flush_tlb |= sh_remove_write_access(d, mfn, 1, 0);
-                }
-                dirty_vram->last_dirty = -1;
+                mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
+                if ( mfn_x(mfn) != INVALID_MFN )
+                    flush_tlb |= sh_remove_write_access(d, mfn, 1, 0);
             }
-            rc = 0;
+            dirty_vram->last_dirty = -1;
         }
     }
     if ( flush_tlb )
@@ -3697,6 +3698,16 @@ out_dirty_vram:
 
 out:
     paging_unlock(d);
+    if ( rc == 0 && dirty_bitmap != NULL &&
+         copy_to_guest(guest_dirty_bitmap, dirty_bitmap, dirty_size) )
+    {
+        paging_lock(d);
+        for ( i = 0; i < dirty_size; i++ )
+            dirty_vram->dirty_bitmap[i] |= dirty_bitmap[i];
+        paging_unlock(d);
+        rc = -EFAULT;
+    }
+    vfree(dirty_bitmap);
     p2m_unlock(p2m_get_hostp2m(d));
     return rc;
 }
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v7 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map
  2015-05-14 15:06 [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
  2015-05-14 15:06 ` [PATCH v7 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
  2015-05-14 15:06 ` [PATCH v7 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests Roger Pau Monne
@ 2015-05-14 15:06 ` Roger Pau Monne
  2015-05-14 15:06 ` [PATCH v7 4/4] xen: rework paging_log_dirty_op to work with hvm guests Roger Pau Monne
  2015-05-14 15:21 ` [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Tim Deegan
  4 siblings, 0 replies; 7+ messages in thread
From: Roger Pau Monne @ 2015-05-14 15:06 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Tim Deegan, Jan Beulich, Roger Pau Monne

Just like it's done for shadow_track_dirty_vram allocate the temporary
buffer using non-contiguous memory.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Cc: Tim Deegan <tim@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes since v4:
 - Use vzalloc in order to allocate zeroed memory.
---
 xen/arch/x86/mm/hap/hap.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index c28a6e3..cea7990 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -87,7 +87,7 @@ int hap_track_dirty_vram(struct domain *d,
         }
 
         rc = -ENOMEM;
-        dirty_bitmap = xzalloc_bytes(size);
+        dirty_bitmap = vzalloc(size);
         if ( !dirty_bitmap )
             goto out;
 
@@ -174,7 +174,7 @@ int hap_track_dirty_vram(struct domain *d,
                                   p2m_ram_logdirty, p2m_ram_rw);
     }
 out:
-    xfree(dirty_bitmap);
+    vfree(dirty_bitmap);
 
     return rc;
 }
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v7 4/4] xen: rework paging_log_dirty_op to work with hvm guests
  2015-05-14 15:06 [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
                   ` (2 preceding siblings ...)
  2015-05-14 15:06 ` [PATCH v7 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map Roger Pau Monne
@ 2015-05-14 15:06 ` Roger Pau Monne
  2015-05-14 15:21 ` [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Tim Deegan
  4 siblings, 0 replies; 7+ messages in thread
From: Roger Pau Monne @ 2015-05-14 15:06 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Tim Deegan, Jan Beulich, Roger Pau Monne

When the caller of paging_log_dirty_op is a hvm guest Xen would choke when
trying to copy the dirty bitmap to the guest because the paging lock is
already held.

Fix this by independently mapping each page of the guest bitmap as needed
without the paging lock held.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes since v6:
 - Move the again label to the start of the function.
 - Set all the preempt fields if we need to map a new page (just like on the
   preempt case).

Changes since v4:
 - Indent again label.
 - Replace bogus paddr_t cast with proper type.
 - Update preempt.log_dirty before dropping the paging lock.

Changes since v3:
 - Drop last parameter from map_dirty_bitmap.
 - Drop pointless initializers in paging_log_dirty_op.
 - Add a new field to paging_domain in order to copy i2 position.
 - Move the again case up to make sure we don't hold cached values of the
   contents of log_dirty.
 - Replace the BUG_ON in paging_log_dirty_op with an ASSERT.

Changes since v2:
 - Add checks for p2m_is_ram and p2m_is_discard_write when mapping a guest
   page.
 - Remove error checking from memset/memcpy, they unconditionally return
   dst.
---
 xen/arch/x86/mm/paging.c     | 97 +++++++++++++++++++++++++++++++++++++++-----
 xen/include/asm-x86/domain.h |  1 +
 2 files changed, 87 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 59d4720..5eee88c 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -408,6 +408,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
     return rv;
 }
 
+static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
+                                     unsigned long pages,
+                                     struct page_info **page)
+{
+    uint32_t pfec = PFEC_page_present | PFEC_write_access;
+    unsigned long gfn;
+    p2m_type_t p2mt;
+
+    gfn = paging_gva_to_gfn(current,
+                            (unsigned long)(dirty_bitmap.p + (pages >> 3)),
+                            &pfec);
+    if ( gfn == INVALID_GFN )
+        return NULL;
+
+    *page = get_page_from_gfn(current->domain, gfn, &p2mt, P2M_UNSHARE);
+
+    if ( !p2m_is_ram(p2mt) )
+    {
+        put_page(*page);
+        return NULL;
+    }
+    if ( p2m_is_paging(p2mt) )
+    {
+        put_page(*page);
+        p2m_mem_paging_populate(current->domain, gfn);
+        return NULL;
+    }
+    if ( p2m_is_shared(p2mt) || p2m_is_discard_write(p2mt) )
+    {
+        put_page(*page);
+        return NULL;
+    }
+
+    return __map_domain_page(*page);
+}
+
+static inline void unmap_dirty_bitmap(void *addr, struct page_info *page)
+{
+    if ( addr != NULL )
+    {
+        unmap_domain_page(addr);
+        put_page(page);
+    }
+}
+
 
 /* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN,
  * clear the bitmap and stats as well. */
@@ -420,7 +465,11 @@ static int paging_log_dirty_op(struct domain *d,
     mfn_t *l4 = NULL, *l3 = NULL, *l2 = NULL;
     unsigned long *l1 = NULL;
     int i4, i3, i2;
+    uint8_t *dirty_bitmap;
+    struct page_info *page;
+    unsigned long index_mapped;
 
+ again:
     if ( !resuming )
     {
         domain_pause(d);
@@ -433,6 +482,14 @@ static int paging_log_dirty_op(struct domain *d,
         p2m_flush_hardware_cached_dirty(d);
     }
 
+    index_mapped = resuming ? d->arch.paging.preempt.log_dirty.done : 0;
+    dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, index_mapped, &page);
+    if ( dirty_bitmap == NULL )
+    {
+        domain_unpause(d);
+        return -EFAULT;
+    }
+
     paging_lock(d);
 
     if ( !d->arch.paging.preempt.dom )
@@ -472,18 +529,18 @@ static int paging_log_dirty_op(struct domain *d,
     l4 = paging_map_log_dirty_bitmap(d);
     i4 = d->arch.paging.preempt.log_dirty.i4;
     i3 = d->arch.paging.preempt.log_dirty.i3;
+    i2 = d->arch.paging.preempt.log_dirty.i2;
     pages = d->arch.paging.preempt.log_dirty.done;
 
     for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
     {
         l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
-        for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
+        for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
+             i3++, i2 = 0 )
         {
             l2 = ((l3 && mfn_valid(l3[i3])) ?
                   map_domain_page(mfn_x(l3[i3])) : NULL);
-            for ( i2 = 0;
-                  (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
-                  i2++ )
+            for ( ; (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ )
             {
                 unsigned int bytes = PAGE_SIZE;
                 l1 = ((l2 && mfn_valid(l2[i2])) ?
@@ -492,15 +549,28 @@ static int paging_log_dirty_op(struct domain *d,
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
                 if ( likely(peek) )
                 {
-                    if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap,
-                                                    pages >> 3, (uint8_t *)l1,
-                                                    bytes)
-                             : clear_guest_offset(sc->dirty_bitmap,
-                                                  pages >> 3, bytes)) != 0 )
+                    if ( pages >> (3 + PAGE_SHIFT) !=
+                         index_mapped >> (3 + PAGE_SHIFT) )
                     {
-                        rv = -EFAULT;
-                        goto out;
+                        /* We need to map next page */
+                        d->arch.paging.preempt.log_dirty.i4 = i4;
+                        d->arch.paging.preempt.log_dirty.i3 = i3;
+                        d->arch.paging.preempt.log_dirty.i2 = i2;
+                        d->arch.paging.preempt.log_dirty.done = pages;
+                        d->arch.paging.preempt.dom = current->domain;
+                        d->arch.paging.preempt.op = sc->op;
+                        resuming = 1;
+                        paging_unlock(d);
+                        unmap_dirty_bitmap(dirty_bitmap, page);
+                        goto again;
                     }
+                    ASSERT(((pages >> 3) % PAGE_SIZE) + bytes <= PAGE_SIZE);
+                    if ( l1 )
+                        memcpy(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), l1,
+                               bytes);
+                    else
+                        memset(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), 0,
+                               bytes);
                 }
                 pages += bytes << 3;
                 if ( l1 )
@@ -517,6 +587,7 @@ static int paging_log_dirty_op(struct domain *d,
             {
                 d->arch.paging.preempt.log_dirty.i4 = i4;
                 d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
+                d->arch.paging.preempt.log_dirty.i2 = 0;
                 rv = -ERESTART;
                 break;
             }
@@ -529,6 +600,7 @@ static int paging_log_dirty_op(struct domain *d,
         {
             d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
             d->arch.paging.preempt.log_dirty.i3 = 0;
+            d->arch.paging.preempt.log_dirty.i2 = 0;
             rv = -ERESTART;
         }
         if ( rv )
@@ -558,6 +630,7 @@ static int paging_log_dirty_op(struct domain *d,
     if ( rv )
     {
         /* Never leave the domain paused on real errors. */
+        unmap_dirty_bitmap(dirty_bitmap, page);
         ASSERT(rv == -ERESTART);
         return rv;
     }
@@ -570,12 +643,14 @@ static int paging_log_dirty_op(struct domain *d,
          * paging modes (shadow or hap).  Safe because the domain is paused. */
         d->arch.paging.log_dirty.clean_dirty_bitmap(d);
     }
+    unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
     return rv;
 
  out:
     d->arch.paging.preempt.dom = NULL;
     paging_unlock(d);
+    unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
 
     if ( l1 )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 3f83e8b..e364a2c 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -202,6 +202,7 @@ struct paging_domain {
                 unsigned long done:PADDR_BITS - PAGE_SHIFT;
                 unsigned long i4:PAGETABLE_ORDER;
                 unsigned long i3:PAGETABLE_ORDER;
+                unsigned long i2:PAGETABLE_ORDER;
             } log_dirty;
         };
     } preempt;
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0
  2015-05-14 15:06 [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
                   ` (3 preceding siblings ...)
  2015-05-14 15:06 ` [PATCH v7 4/4] xen: rework paging_log_dirty_op to work with hvm guests Roger Pau Monne
@ 2015-05-14 15:21 ` Tim Deegan
  4 siblings, 0 replies; 7+ messages in thread
From: Tim Deegan @ 2015-05-14 15:21 UTC (permalink / raw)
  To: Roger Pau Monne; +Cc: xen-devel

At 17:06 +0200 on 14 May (1431623173), Roger Pau Monne wrote:
> Changes in this revision include using clear_page instead of memset in 
> vzalloc and minor fixes to paging_log_dirty_op in order to make the page 
> unmapping/mapping more similar to the preempt case.

All Reviewed-by: Tim Deegan <tim@xen.org>

Cheers,

Tim.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v7 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests
  2015-05-14 15:06 ` [PATCH v7 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests Roger Pau Monne
@ 2015-05-22  7:59   ` Jan Beulich
  0 siblings, 0 replies; 7+ messages in thread
From: Jan Beulich @ 2015-05-22  7:59 UTC (permalink / raw)
  To: Roger Pau Monne, Tim Deegan; +Cc: Andrew Cooper, xen-devel

>>> On 14.05.15 at 17:06, <roger.pau@citrix.com> wrote:
> @@ -3584,12 +3591,8 @@ int shadow_track_dirty_vram(struct domain *d,
>          rc = -ENODATA;
>      }
>      else if (dirty_vram->last_dirty == -1)
> -    {
>          /* still completely clean, just copy our empty bitmap */
> -        rc = -EFAULT;
> -        if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 )
> -            rc = 0;
> -    }
> +        memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);

Btw, having looked at this again in the context of the coverity issue
it causes - wouldn't this (according to the retained comment) be a
memset()? And if so, in turn a loop over clear_page()?

Which gets me to a second thing: With the large amounts of data
being pushed through copy_to_guest() here and in its HAP
counterpart, wouldn't it make sense to have a specialized function
avoiding cache thrashing along the lines of clear_page() and
copy_page()?

Jan

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-05-22  7:59 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-14 15:06 [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Roger Pau Monne
2015-05-14 15:06 ` [PATCH v7 1/4] xen: introduce a helper to allocate non-contiguous memory Roger Pau Monne
2015-05-14 15:06 ` [PATCH v7 2/4] xen/shadow: fix shadow_track_dirty_vram to work on hvm guests Roger Pau Monne
2015-05-22  7:59   ` Jan Beulich
2015-05-14 15:06 ` [PATCH v7 3/4] xen/hap: make hap_track_dirty_vram use non-contiguous memory for temporary map Roger Pau Monne
2015-05-14 15:06 ` [PATCH v7 4/4] xen: rework paging_log_dirty_op to work with hvm guests Roger Pau Monne
2015-05-14 15:21 ` [PATCH v7 0/4] xen/pvh: enable migration on PVH Dom0 Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.