xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] x86/HVM: hvm_map_guest_frame_rw() adjustments
@ 2015-08-12 14:09 Jan Beulich
  2015-08-12 14:16 ` [PATCH v2 1/2] " Jan Beulich
  2015-08-12 14:19 ` [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw() Jan Beulich
  0 siblings, 2 replies; 13+ messages in thread
From: Jan Beulich @ 2015-08-12 14:09 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu

1: honor p2m_ram_ro in hvm_map_guest_frame_rw()
2: correct page dirty marking in hvm_map_guest_frame_rw()

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Re-base patch one on top of altp2m series. New patch 2.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH v2 1/2] x86/HVM: hvm_map_guest_frame_rw() adjustments
  2015-08-12 14:09 [PATCH v2 0/2] x86/HVM: hvm_map_guest_frame_rw() adjustments Jan Beulich
@ 2015-08-12 14:16 ` Jan Beulich
  2015-08-12 15:44   ` Andrew Cooper
                     ` (2 more replies)
  2015-08-12 14:19 ` [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw() Jan Beulich
  1 sibling, 3 replies; 13+ messages in thread
From: Jan Beulich @ 2015-08-12 14:16 UTC (permalink / raw)
  To: xen-devel
  Cc: Kevin Tian, Wei Liu, suravee.suthikulpanit, Andrew Cooper,
	Aravind Gopalakrishnan, Jun Nakajima, Keir Fraser,
	Boris Ostrovsky

... and its callers.

While all non-nested users are made fully honor the semantics of that
type, doing so in the nested case seemed insane (if doable at all,
considering VMCS shadowing), and hence there the respective operations
are simply made fail.

One case not (yet) taken care of is that of a page getting transitioned
to this type after a mapping got established.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Cover new altp2m use too (all previous tags dropped).

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3669,8 +3669,8 @@ int hvm_virtual_to_linear_addr(
 
 /* On non-NULL return, we leave this function holding an additional 
  * ref on the underlying mfn, if any */
-static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable,
-                                   bool_t permanent)
+static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
+                                  bool_t *writable)
 {
     void *map;
     p2m_type_t p2mt;
@@ -3693,7 +3693,12 @@ static void *__hvm_map_guest_frame(unsig
     }
 
     if ( writable )
-        paging_mark_dirty(d, page_to_mfn(page));
+    {
+        if ( !p2m_is_discard_write(p2mt) )
+            paging_mark_dirty(d, page_to_mfn(page));
+        else
+            *writable = 0;
+    }
 
     if ( !permanent )
         return __map_domain_page(page);
@@ -3705,14 +3710,16 @@ static void *__hvm_map_guest_frame(unsig
     return map;
 }
 
-void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent)
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
+                             bool_t *writable)
 {
-    return __hvm_map_guest_frame(gfn, 1, permanent);
+    *writable = 1;
+    return _hvm_map_guest_frame(gfn, permanent, writable);
 }
 
 void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
 {
-    return __hvm_map_guest_frame(gfn, 0, permanent);
+    return _hvm_map_guest_frame(gfn, permanent, NULL);
 }
 
 void hvm_unmap_guest_frame(void *p, bool_t permanent)
@@ -3732,7 +3739,7 @@ void hvm_unmap_guest_frame(void *p, bool
     put_page(mfn_to_page(mfn));
 }
 
-static void *hvm_map_entry(unsigned long va)
+static void *hvm_map_entry(unsigned long va, bool_t *writable)
 {
     unsigned long gfn;
     uint32_t pfec;
@@ -3755,7 +3762,7 @@ static void *hvm_map_entry(unsigned long
     if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) )
         goto fail;
 
-    v = hvm_map_guest_frame_rw(gfn, 0);
+    v = hvm_map_guest_frame_rw(gfn, 0, writable);
     if ( v == NULL )
         goto fail;
 
@@ -3777,6 +3784,7 @@ static int hvm_load_segment_selector(
     struct segment_register desctab, cs, segr;
     struct desc_struct *pdesc, desc;
     u8 dpl, rpl, cpl;
+    bool_t writable;
     int fault_type = TRAP_invalid_tss;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct vcpu *v = current;
@@ -3813,7 +3821,7 @@ static int hvm_load_segment_selector(
     if ( ((sel & 0xfff8) + 7) > desctab.limit )
         goto fail;
 
-    pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8));
+    pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8), &writable);
     if ( pdesc == NULL )
         goto hvm_map_fail;
 
@@ -3872,6 +3880,7 @@ static int hvm_load_segment_selector(
             break;
         }
     } while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
+              writable && /* except if we are to discard writes */
               (cmpxchg(&pdesc->b, desc.b, desc.b | 0x100) != desc.b) );
 
     /* Force the Accessed flag in our local copy. */
@@ -3909,6 +3918,7 @@ void hvm_task_switch(
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct segment_register gdt, tr, prev_tr, segr;
     struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
+    bool_t otd_writable, ntd_writable;
     unsigned long eflags;
     int exn_raised, rc;
     struct {
@@ -3935,11 +3945,12 @@ void hvm_task_switch(
         goto out;
     }
 
-    optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8)); 
+    optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8),
+                               &otd_writable);
     if ( optss_desc == NULL )
         goto out;
 
-    nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8)); 
+    nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8), &ntd_writable);
     if ( nptss_desc == NULL )
         goto out;
 
@@ -4071,11 +4082,11 @@ void hvm_task_switch(
     v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS;
     hvm_update_guest_cr(v, 0);
 
-    if ( (taskswitch_reason == TSW_iret) ||
-         (taskswitch_reason == TSW_jmp) )
+    if ( (taskswitch_reason == TSW_iret ||
+          taskswitch_reason == TSW_jmp) && otd_writable)
         clear_bit(41, optss_desc); /* clear B flag of old task */
 
-    if ( taskswitch_reason != TSW_iret )
+    if ( taskswitch_reason != TSW_iret && ntd_writable )
         set_bit(41, nptss_desc); /* set B flag of new task */
 
     if ( errcode >= 0 )
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -74,10 +74,20 @@ int nestedsvm_vmcb_map(struct vcpu *v, u
         nv->nv_vvmcxaddr = VMCX_EADDR;
     }
 
-    if (nv->nv_vvmcx == NULL) {
-        nv->nv_vvmcx = hvm_map_guest_frame_rw(vmcbaddr >> PAGE_SHIFT, 1);
-        if (nv->nv_vvmcx == NULL)
+    if ( !nv->nv_vvmcx )
+    {
+        bool_t writable;
+        void *vvmcx = hvm_map_guest_frame_rw(paddr_to_pfn(vmcbaddr), 1,
+                                             &writable);
+
+        if ( !vvmcx )
             return 0;
+        if ( !writable )
+        {
+            hvm_unmap_guest_frame(vvmcx, 1);
+            return 0;
+        }
+        nv->nv_vvmcx = vvmcx;
         nv->nv_vvmcxaddr = vmcbaddr;
     }
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1853,14 +1853,17 @@ static int vmx_vcpu_emulate_vmfunc(struc
 
 static bool_t vmx_vcpu_emulate_ve(struct vcpu *v)
 {
-    bool_t rc = 0;
-    ve_info_t *veinfo = gfn_x(vcpu_altp2m(v).veinfo_gfn) != INVALID_GFN ?
-        hvm_map_guest_frame_rw(gfn_x(vcpu_altp2m(v).veinfo_gfn), 0) : NULL;
+    bool_t rc = 0, writable;
+    unsigned long gfn = gfn_x(vcpu_altp2m(v).veinfo_gfn);
+    ve_info_t *veinfo;
 
-    if ( !veinfo )
+    if ( gfn == INVALID_GFN )
         return 0;
 
-    if ( veinfo->semaphore != 0 )
+    veinfo = hvm_map_guest_frame_rw(gfn, 0, &writable);
+    if ( !veinfo )
+        return 0;
+    if ( !writable || veinfo->semaphore != 0 )
         goto out;
 
     rc = 1;
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1618,10 +1618,23 @@ int nvmx_handle_vmptrld(struct cpu_user_
 
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
-        nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        if ( nvcpu->nv_vvmcx )
-            nvcpu->nv_vvmcxaddr = gpa;
-        if ( !nvcpu->nv_vvmcx ||
+        bool_t writable;
+        void *vvmcx = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 1, &writable);
+
+        if ( vvmcx )
+        {
+            if ( writable )
+            {
+                nvcpu->nv_vvmcx = vvmcx;
+                nvcpu->nv_vvmcxaddr = gpa;
+            }
+            else
+            {
+                hvm_unmap_guest_frame(vvmcx, 1);
+                vvmcx = NULL;
+            }
+        }
+        if ( !vvmcx ||
              !map_io_bitmap_all(v) ||
              !_map_msr_bitmap(v) )
         {
@@ -1675,13 +1688,10 @@ int nvmx_handle_vmclear(struct cpu_user_
     if ( rc != X86EMUL_OKAY )
         return rc;
 
+    BUILD_BUG_ON(X86EMUL_OKAY != VMSUCCEED); /* rc = VMSUCCEED; */
     if ( gpa & 0xfff )
-    {
-        vmreturn(regs, VMFAIL_INVALID);
-        return X86EMUL_OKAY;
-    }
-    
-    if ( gpa == nvcpu->nv_vvmcxaddr ) 
+        rc = VMFAIL_INVALID;
+    else if ( gpa == nvcpu->nv_vvmcxaddr )
     {
         if ( cpu_has_vmx_vmcs_shadowing )
             nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx);
@@ -1692,14 +1702,22 @@ int nvmx_handle_vmclear(struct cpu_user_
     else 
     {
         /* Even if this VMCS isn't the current one, we must clear it. */
-        vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
+        bool_t writable;
+
+        vvmcs = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 0, &writable);
         if ( vvmcs ) 
-            clear_vvmcs_launched(&nvmx->launched_list,
-                domain_page_map_to_mfn(vvmcs));
-        hvm_unmap_guest_frame(vvmcs, 0);
+        {
+            if ( writable )
+                clear_vvmcs_launched(&nvmx->launched_list,
+                                     domain_page_map_to_mfn(vvmcs));
+            else
+                rc = VMFAIL_VALID;
+            hvm_unmap_guest_frame(vvmcs, 0);
+        }
     }
 
-    vmreturn(regs, VMSUCCEED);
+    vmreturn(regs, rc);
+
     return X86EMUL_OKAY;
 }
 
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -440,7 +440,8 @@ int hvm_virtual_to_linear_addr(
     unsigned int addr_size,
     unsigned long *linear_addr);
 
-void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent);
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
+                             bool_t *writable);
 void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
 void hvm_unmap_guest_frame(void *p, bool_t permanent);

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()
  2015-08-12 14:09 [PATCH v2 0/2] x86/HVM: hvm_map_guest_frame_rw() adjustments Jan Beulich
  2015-08-12 14:16 ` [PATCH v2 1/2] " Jan Beulich
@ 2015-08-12 14:19 ` Jan Beulich
  2015-08-12 15:13   ` Andrew Cooper
  1 sibling, 1 reply; 13+ messages in thread
From: Jan Beulich @ 2015-08-12 14:19 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Keir Fraser, Wei Liu

[-- Attachment #1: Type: text/plain, Size: 5080 bytes --]

Rather than dirtying a page when establishing a (permanent) mapping,
dirty it when the page gets unmapped, or - if still mapped - on the
final iteration of a save operation. (Transient mappings continue to
get dirtied upon getting mapped, to avoid the overhead of tracking.)

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1556,6 +1556,8 @@ int hvm_domain_initialise(struct domain 
     INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
     spin_lock_init(&d->arch.hvm_domain.uc_lock);
+    spin_lock_init(&d->arch.hvm_domain.write_map.lock);
+    INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
 
     hvm_init_cacheattr_region_list(d);
 
@@ -3667,6 +3669,11 @@ int hvm_virtual_to_linear_addr(
     return 1;
 }
 
+struct hvm_write_map {
+    struct list_head list;
+    struct page_info *page;
+};
+
 /* On non-NULL return, we leave this function holding an additional 
  * ref on the underlying mfn, if any */
 static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
@@ -3694,15 +3701,30 @@ static void *_hvm_map_guest_frame(unsign
 
     if ( writable )
     {
-        if ( !p2m_is_discard_write(p2mt) )
-            paging_mark_dirty(d, page_to_mfn(page));
-        else
+        if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
+        else if ( !permanent )
+            paging_mark_dirty(d, page_to_mfn(page));
     }
 
     if ( !permanent )
         return __map_domain_page(page);
 
+    if ( writable && *writable )
+    {
+        struct hvm_write_map *track = xmalloc(struct hvm_write_map);
+
+        if ( !track )
+        {
+            put_page(page);
+            return NULL;
+        }
+        track->page = page;
+        spin_lock(&d->arch.hvm_domain.write_map.lock);
+        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
+        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    }
+
     map = __map_domain_page_global(page);
     if ( !map )
         put_page(page);
@@ -3725,18 +3747,45 @@ void *hvm_map_guest_frame_ro(unsigned lo
 void hvm_unmap_guest_frame(void *p, bool_t permanent)
 {
     unsigned long mfn;
+    struct page_info *page;
 
     if ( !p )
         return;
 
     mfn = domain_page_map_to_mfn(p);
+    page = mfn_to_page(mfn);
 
     if ( !permanent )
         unmap_domain_page(p);
     else
+    {
+        struct domain *d = page_get_owner(page);
+        struct hvm_write_map *track;
+
         unmap_domain_page_global(p);
+        spin_lock(&d->arch.hvm_domain.write_map.lock);
+        list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+            if ( track->page == page )
+            {
+                paging_mark_dirty(d, mfn);
+                list_del(&track->list);
+                xfree(track);
+                break;
+            }
+        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    }
+
+    put_page(page);
+}
+
+void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
+{
+    struct hvm_write_map *track;
 
-    put_page(mfn_to_page(mfn));
+    spin_lock(&d->arch.hvm_domain.write_map.lock);
+    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+        paging_mark_dirty(d, page_to_mfn(track->page));
+    spin_unlock(&d->arch.hvm_domain.write_map.lock);
 }
 
 static void *hvm_map_entry(unsigned long va, bool_t *writable)
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -29,6 +29,7 @@
 #include <asm/hvm/nestedhvm.h>
 #include <xen/numa.h>
 #include <xsm/xsm.h>
+#include <public/sched.h> /* SHUTDOWN_suspend */
 
 #include "mm-locks.h"
 
@@ -422,6 +423,14 @@ static int paging_log_dirty_op(struct do
 
     if ( !resuming )
     {
+        /*
+         * Mark dirty all currently write-mapped pages on the final iteration
+         * of a HVM save operation.
+         */
+        if ( has_hvm_container_domain(d) && d->is_shut_down &&
+             d->shutdown_code == SHUTDOWN_suspend )
+            hvm_mapped_guest_frames_mark_dirty(d);
+
         domain_pause(d);
 
         /*
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -145,6 +145,12 @@ struct hvm_domain {
 
     unsigned long *io_bitmap;
 
+    /* List of permanently write-mapped pages. */
+    struct {
+        spinlock_t lock;
+        struct list_head list;
+    } write_map;
+
     union {
         struct vmx_domain vmx;
         struct svm_domain svm;
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -444,6 +444,7 @@ void *hvm_map_guest_frame_rw(unsigned lo
                              bool_t *writable);
 void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
 void hvm_unmap_guest_frame(void *p, bool_t permanent);
+void hvm_mapped_guest_frames_mark_dirty(struct domain *);
 
 static inline void hvm_set_info_guest(struct vcpu *v)
 {



[-- Attachment #2: x86-HVM-map-gf-dirtying.patch --]
[-- Type: text/plain, Size: 5143 bytes --]

x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()

Rather than dirtying a page when establishing a (permanent) mapping,
dirty it when the page gets unmapped, or - if still mapped - on the
final iteration of a save operation. (Transient mappings continue to
get dirtied upon getting mapped, to avoid the overhead of tracking.)

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1556,6 +1556,8 @@ int hvm_domain_initialise(struct domain 
     INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
     spin_lock_init(&d->arch.hvm_domain.uc_lock);
+    spin_lock_init(&d->arch.hvm_domain.write_map.lock);
+    INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
 
     hvm_init_cacheattr_region_list(d);
 
@@ -3667,6 +3669,11 @@ int hvm_virtual_to_linear_addr(
     return 1;
 }
 
+struct hvm_write_map {
+    struct list_head list;
+    struct page_info *page;
+};
+
 /* On non-NULL return, we leave this function holding an additional 
  * ref on the underlying mfn, if any */
 static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
@@ -3694,15 +3701,30 @@ static void *_hvm_map_guest_frame(unsign
 
     if ( writable )
     {
-        if ( !p2m_is_discard_write(p2mt) )
-            paging_mark_dirty(d, page_to_mfn(page));
-        else
+        if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
+        else if ( !permanent )
+            paging_mark_dirty(d, page_to_mfn(page));
     }
 
     if ( !permanent )
         return __map_domain_page(page);
 
+    if ( writable && *writable )
+    {
+        struct hvm_write_map *track = xmalloc(struct hvm_write_map);
+
+        if ( !track )
+        {
+            put_page(page);
+            return NULL;
+        }
+        track->page = page;
+        spin_lock(&d->arch.hvm_domain.write_map.lock);
+        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
+        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    }
+
     map = __map_domain_page_global(page);
     if ( !map )
         put_page(page);
@@ -3725,18 +3747,45 @@ void *hvm_map_guest_frame_ro(unsigned lo
 void hvm_unmap_guest_frame(void *p, bool_t permanent)
 {
     unsigned long mfn;
+    struct page_info *page;
 
     if ( !p )
         return;
 
     mfn = domain_page_map_to_mfn(p);
+    page = mfn_to_page(mfn);
 
     if ( !permanent )
         unmap_domain_page(p);
     else
+    {
+        struct domain *d = page_get_owner(page);
+        struct hvm_write_map *track;
+
         unmap_domain_page_global(p);
+        spin_lock(&d->arch.hvm_domain.write_map.lock);
+        list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+            if ( track->page == page )
+            {
+                paging_mark_dirty(d, mfn);
+                list_del(&track->list);
+                xfree(track);
+                break;
+            }
+        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    }
+
+    put_page(page);
+}
+
+void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
+{
+    struct hvm_write_map *track;
 
-    put_page(mfn_to_page(mfn));
+    spin_lock(&d->arch.hvm_domain.write_map.lock);
+    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+        paging_mark_dirty(d, page_to_mfn(track->page));
+    spin_unlock(&d->arch.hvm_domain.write_map.lock);
 }
 
 static void *hvm_map_entry(unsigned long va, bool_t *writable)
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -29,6 +29,7 @@
 #include <asm/hvm/nestedhvm.h>
 #include <xen/numa.h>
 #include <xsm/xsm.h>
+#include <public/sched.h> /* SHUTDOWN_suspend */
 
 #include "mm-locks.h"
 
@@ -422,6 +423,14 @@ static int paging_log_dirty_op(struct do
 
     if ( !resuming )
     {
+        /*
+         * Mark dirty all currently write-mapped pages on the final iteration
+         * of a HVM save operation.
+         */
+        if ( has_hvm_container_domain(d) && d->is_shut_down &&
+             d->shutdown_code == SHUTDOWN_suspend )
+            hvm_mapped_guest_frames_mark_dirty(d);
+
         domain_pause(d);
 
         /*
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -145,6 +145,12 @@ struct hvm_domain {
 
     unsigned long *io_bitmap;
 
+    /* List of permanently write-mapped pages. */
+    struct {
+        spinlock_t lock;
+        struct list_head list;
+    } write_map;
+
     union {
         struct vmx_domain vmx;
         struct svm_domain svm;
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -444,6 +444,7 @@ void *hvm_map_guest_frame_rw(unsigned lo
                              bool_t *writable);
 void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
 void hvm_unmap_guest_frame(void *p, bool_t permanent);
+void hvm_mapped_guest_frames_mark_dirty(struct domain *);
 
 static inline void hvm_set_info_guest(struct vcpu *v)
 {

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()
  2015-08-12 14:19 ` [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw() Jan Beulich
@ 2015-08-12 15:13   ` Andrew Cooper
  2015-08-12 15:26     ` Jan Beulich
  0 siblings, 1 reply; 13+ messages in thread
From: Andrew Cooper @ 2015-08-12 15:13 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap, Keir Fraser, Wei Liu

On 12/08/15 15:19, Jan Beulich wrote:
> Rather than dirtying a page when establishing a (permanent) mapping,
> dirty it when the page gets unmapped, or - if still mapped - on the
> final iteration of a save operation. (Transient mappings continue to
> get dirtied upon getting mapped, to avoid the overhead of tracking.)
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1556,6 +1556,8 @@ int hvm_domain_initialise(struct domain 
>      INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
>      spin_lock_init(&d->arch.hvm_domain.irq_lock);
>      spin_lock_init(&d->arch.hvm_domain.uc_lock);
> +    spin_lock_init(&d->arch.hvm_domain.write_map.lock);
> +    INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
>  
>      hvm_init_cacheattr_region_list(d);
>  
> @@ -3667,6 +3669,11 @@ int hvm_virtual_to_linear_addr(
>      return 1;
>  }
>  
> +struct hvm_write_map {
> +    struct list_head list;
> +    struct page_info *page;
> +};
> +
>  /* On non-NULL return, we leave this function holding an additional 
>   * ref on the underlying mfn, if any */
>  static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
> @@ -3694,15 +3701,30 @@ static void *_hvm_map_guest_frame(unsign
>  
>      if ( writable )
>      {
> -        if ( !p2m_is_discard_write(p2mt) )
> -            paging_mark_dirty(d, page_to_mfn(page));
> -        else
> +        if ( unlikely(p2m_is_discard_write(p2mt)) )
>              *writable = 0;
> +        else if ( !permanent )
> +            paging_mark_dirty(d, page_to_mfn(page));
>      }
>  
>      if ( !permanent )
>          return __map_domain_page(page);
>  
> +    if ( writable && *writable )
> +    {
> +        struct hvm_write_map *track = xmalloc(struct hvm_write_map);
> +
> +        if ( !track )
> +        {
> +            put_page(page);
> +            return NULL;
> +        }
> +        track->page = page;
> +        spin_lock(&d->arch.hvm_domain.write_map.lock);
> +        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);

Map and unmap of non-permenant pages will happen in a stacked fashion,
so putting non-persistent pages on the head of the list will be more
efficient when walking the list for removal.

> +        spin_unlock(&d->arch.hvm_domain.write_map.lock);
> +    }
> +
>      map = __map_domain_page_global(page);
>      if ( !map )
>          put_page(page);
> @@ -3725,18 +3747,45 @@ void *hvm_map_guest_frame_ro(unsigned lo
>  void hvm_unmap_guest_frame(void *p, bool_t permanent)
>  {
>      unsigned long mfn;
> +    struct page_info *page;
>  
>      if ( !p )
>          return;
>  
>      mfn = domain_page_map_to_mfn(p);
> +    page = mfn_to_page(mfn);
>  
>      if ( !permanent )
>          unmap_domain_page(p);
>      else
> +    {
> +        struct domain *d = page_get_owner(page);
> +        struct hvm_write_map *track;
> +
>          unmap_domain_page_global(p);
> +        spin_lock(&d->arch.hvm_domain.write_map.lock);
> +        list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
> +            if ( track->page == page )
> +            {
> +                paging_mark_dirty(d, mfn);
> +                list_del(&track->list);
> +                xfree(track);
> +                break;
> +            }
> +        spin_unlock(&d->arch.hvm_domain.write_map.lock);
> +    }
> +
> +    put_page(page);
> +}
> +
> +void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
> +{
> +    struct hvm_write_map *track;
>  
> -    put_page(mfn_to_page(mfn));
> +    spin_lock(&d->arch.hvm_domain.write_map.lock);
> +    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
> +        paging_mark_dirty(d, page_to_mfn(track->page));

This is potentially a long running operation.  It might be easier to
ASSERT() an upper limit of mapped pages than to make this restartable.

> +    spin_unlock(&d->arch.hvm_domain.write_map.lock);
>  }
>  
>  static void *hvm_map_entry(unsigned long va, bool_t *writable)
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -29,6 +29,7 @@
>  #include <asm/hvm/nestedhvm.h>
>  #include <xen/numa.h>
>  #include <xsm/xsm.h>
> +#include <public/sched.h> /* SHUTDOWN_suspend */
>  
>  #include "mm-locks.h"
>  
> @@ -422,6 +423,14 @@ static int paging_log_dirty_op(struct do
>  
>      if ( !resuming )
>      {
> +        /*
> +         * Mark dirty all currently write-mapped pages on the final iteration
> +         * of a HVM save operation.
> +         */
> +        if ( has_hvm_container_domain(d) && d->is_shut_down &&
> +             d->shutdown_code == SHUTDOWN_suspend )
> +            hvm_mapped_guest_frames_mark_dirty(d);

I am not sure whether this is useful.  There are situations such as
memory snapshot where it is insufficient, as the domain doesn't suspend.

It would probably be better to hook this off a specific request from the
toolstack, as the migration code is in a far better position to know
whether this information is needed or can be deferred to the next iteration.

~Andrew

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()
  2015-08-12 15:13   ` Andrew Cooper
@ 2015-08-12 15:26     ` Jan Beulich
  2015-08-12 17:24       ` Andrew Cooper
  0 siblings, 1 reply; 13+ messages in thread
From: Jan Beulich @ 2015-08-12 15:26 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, xen-devel, Keir Fraser, Wei Liu

>>> On 12.08.15 at 17:13, <andrew.cooper3@citrix.com> wrote:
> On 12/08/15 15:19, Jan Beulich wrote:
>> +    if ( writable && *writable )
>> +    {
>> +        struct hvm_write_map *track = xmalloc(struct hvm_write_map);
>> +
>> +        if ( !track )
>> +        {
>> +            put_page(page);
>> +            return NULL;
>> +        }
>> +        track->page = page;
>> +        spin_lock(&d->arch.hvm_domain.write_map.lock);
>> +        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
> 
> Map and unmap of non-permenant pages will happen in a stacked fashion,
> so putting non-persistent pages on the head of the list will be more
> efficient when walking the list for removal.

But this is dealing with permanent mappings.

>> +void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
>> +{
>> +    struct hvm_write_map *track;
>>  
>> -    put_page(mfn_to_page(mfn));
>> +    spin_lock(&d->arch.hvm_domain.write_map.lock);
>> +    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
>> +        paging_mark_dirty(d, page_to_mfn(track->page));
> 
> This is potentially a long running operation.  It might be easier to
> ASSERT() an upper limit of mapped pages than to make this restartable.

I don't think I can come up with a sensible upper limit - do you have a
good suggestion (including a rationale)? Also I don't view this as an
immediate problem - as long as we're limiting HVM guests to 128 vCPU-s
we're not going to see many more than 128 such mappings, and even
those only from nested HVM code. (So to answer my question - 256
would seem a reasonable limit for now.)

>> --- a/xen/arch/x86/mm/paging.c
>> +++ b/xen/arch/x86/mm/paging.c
>> @@ -29,6 +29,7 @@
>>  #include <asm/hvm/nestedhvm.h>
>>  #include <xen/numa.h>
>>  #include <xsm/xsm.h>
>> +#include <public/sched.h> /* SHUTDOWN_suspend */
>>  
>>  #include "mm-locks.h"
>>  
>> @@ -422,6 +423,14 @@ static int paging_log_dirty_op(struct do
>>  
>>      if ( !resuming )
>>      {
>> +        /*
>> +         * Mark dirty all currently write-mapped pages on the final iteration
>> +         * of a HVM save operation.
>> +         */
>> +        if ( has_hvm_container_domain(d) && d->is_shut_down &&
>> +             d->shutdown_code == SHUTDOWN_suspend )
>> +            hvm_mapped_guest_frames_mark_dirty(d);
> 
> I am not sure whether this is useful.  There are situations such as
> memory snapshot where it is insufficient, as the domain doesn't suspend.

Perhaps the condition could be refined then? And then - isn't
memory snapshot what Remus does (which I checked does a
suspend in the tool stack)?

> It would probably be better to hook this off a specific request from the
> toolstack, as the migration code is in a far better position to know
> whether this information is needed or can be deferred to the next iteration.

Which next iteration (when we're talking about the final one)?

I considered tool stack involvement, but would like to go that
route only if unavoidable.

Jan

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/2] x86/HVM: hvm_map_guest_frame_rw() adjustments
  2015-08-12 14:16 ` [PATCH v2 1/2] " Jan Beulich
@ 2015-08-12 15:44   ` Andrew Cooper
  2015-08-13  9:22   ` Wei Liu
  2015-08-14  7:28   ` Tian, Kevin
  2 siblings, 0 replies; 13+ messages in thread
From: Andrew Cooper @ 2015-08-12 15:44 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, Wei Liu, suravee.suthikulpanit,
	Aravind Gopalakrishnan, Jun Nakajima, Keir Fraser,
	Boris Ostrovsky

On 12/08/15 15:16, Jan Beulich wrote:
> ... and its callers.
>
> While all non-nested users are made fully honor the semantics of that
> type, doing so in the nested case seemed insane (if doable at all,
> considering VMCS shadowing), and hence there the respective operations
> are simply made fail.
>
> One case not (yet) taken care of is that of a page getting transitioned
> to this type after a mapping got established.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>, with one style
correction...

> @@ -4071,11 +4082,11 @@ void hvm_task_switch(
>      v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS;
>      hvm_update_guest_cr(v, 0);
>  
> -    if ( (taskswitch_reason == TSW_iret) ||
> -         (taskswitch_reason == TSW_jmp) )
> +    if ( (taskswitch_reason == TSW_iret ||
> +          taskswitch_reason == TSW_jmp) && otd_writable)

Space.

~Andrew

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()
  2015-08-12 15:26     ` Jan Beulich
@ 2015-08-12 17:24       ` Andrew Cooper
  2015-08-13  6:32         ` Jan Beulich
  0 siblings, 1 reply; 13+ messages in thread
From: Andrew Cooper @ 2015-08-12 17:24 UTC (permalink / raw)
  To: Jan Beulich; +Cc: George Dunlap, xen-devel, Keir Fraser, Wei Liu

On 12/08/15 16:26, Jan Beulich wrote:
>>>> On 12.08.15 at 17:13, <andrew.cooper3@citrix.com> wrote:
>> On 12/08/15 15:19, Jan Beulich wrote:
>>> +    if ( writable && *writable )
>>> +    {
>>> +        struct hvm_write_map *track = xmalloc(struct hvm_write_map);
>>> +
>>> +        if ( !track )
>>> +        {
>>> +            put_page(page);
>>> +            return NULL;
>>> +        }
>>> +        track->page = page;
>>> +        spin_lock(&d->arch.hvm_domain.write_map.lock);
>>> +        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
>> Map and unmap of non-permenant pages will happen in a stacked fashion,
>> so putting non-persistent pages on the head of the list will be more
>> efficient when walking the list for removal.
> But this is dealing with permanent mappings.

So it does.  Sorry for the noise.

>
>>> +void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
>>> +{
>>> +    struct hvm_write_map *track;
>>>  
>>> -    put_page(mfn_to_page(mfn));
>>> +    spin_lock(&d->arch.hvm_domain.write_map.lock);
>>> +    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
>>> +        paging_mark_dirty(d, page_to_mfn(track->page));
>> This is potentially a long running operation.  It might be easier to
>> ASSERT() an upper limit of mapped pages than to make this restartable.
> I don't think I can come up with a sensible upper limit - do you have a
> good suggestion (including a rationale)? Also I don't view this as an
> immediate problem - as long as we're limiting HVM guests to 128 vCPU-s
> we're not going to see many more than 128 such mappings, and even
> those only from nested HVM code. (So to answer my question - 256
> would seem a reasonable limit for now.)
>
>>> --- a/xen/arch/x86/mm/paging.c
>>> +++ b/xen/arch/x86/mm/paging.c
>>> @@ -29,6 +29,7 @@
>>>  #include <asm/hvm/nestedhvm.h>
>>>  #include <xen/numa.h>
>>>  #include <xsm/xsm.h>
>>> +#include <public/sched.h> /* SHUTDOWN_suspend */
>>>  
>>>  #include "mm-locks.h"
>>>  
>>> @@ -422,6 +423,14 @@ static int paging_log_dirty_op(struct do
>>>  
>>>      if ( !resuming )
>>>      {
>>> +        /*
>>> +         * Mark dirty all currently write-mapped pages on the final iteration
>>> +         * of a HVM save operation.
>>> +         */
>>> +        if ( has_hvm_container_domain(d) && d->is_shut_down &&
>>> +             d->shutdown_code == SHUTDOWN_suspend )
>>> +            hvm_mapped_guest_frames_mark_dirty(d);
>> I am not sure whether this is useful.  There are situations such as
>> memory snapshot where it is insufficient, as the domain doesn't suspend.
> Perhaps the condition could be refined then? And then - isn't
> memory snapshot what Remus does (which I checked does a
> suspend in the tool stack)?

XenServer live memory snapshots of windows VMs uses a windows API to
quiesce IO, pauses the domain, performs a non-live save, then unpauses
the domain.

Such an action would want the these bits in the bitmap, but would not
match those conditions.

>
>> It would probably be better to hook this off a specific request from the
>> toolstack, as the migration code is in a far better position to know
>> whether this information is needed or can be deferred to the next iteration.
> Which next iteration (when we're talking about the final one)?
>
> I considered tool stack involvement, but would like to go that
> route only if unavoidable.

It is wrong for Xen to guess.  The toolstack should explicitly ask for
them when it wants them.

I have just written my slides for Seattle, which cover some of the
outstanding issues with regards to guests and logdirty.  As migration
with nested virt doesn't function at all, fixing these entries in the
logdirty bitmap is not urgent if you don't fancy extending/tweaking
xen_domctl_shadow_op.

~Andrew

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()
  2015-08-12 17:24       ` Andrew Cooper
@ 2015-08-13  6:32         ` Jan Beulich
  0 siblings, 0 replies; 13+ messages in thread
From: Jan Beulich @ 2015-08-13  6:32 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, xen-devel, Keir Fraser, Wei Liu

>>> On 12.08.15 at 19:24, <andrew.cooper3@citrix.com> wrote:
> On 12/08/15 16:26, Jan Beulich wrote:
>>>>> On 12.08.15 at 17:13, <andrew.cooper3@citrix.com> wrote:
>>> On 12/08/15 15:19, Jan Beulich wrote:
>>>> @@ -422,6 +423,14 @@ static int paging_log_dirty_op(struct do
>>>>  
>>>>      if ( !resuming )
>>>>      {
>>>> +        /*
>>>> +         * Mark dirty all currently write-mapped pages on the final iteration
>>>> +         * of a HVM save operation.
>>>> +         */
>>>> +        if ( has_hvm_container_domain(d) && d->is_shut_down &&
>>>> +             d->shutdown_code == SHUTDOWN_suspend )
>>>> +            hvm_mapped_guest_frames_mark_dirty(d);
>>> I am not sure whether this is useful.  There are situations such as
>>> memory snapshot where it is insufficient, as the domain doesn't suspend.
>> Perhaps the condition could be refined then? And then - isn't
>> memory snapshot what Remus does (which I checked does a
>> suspend in the tool stack)?
> 
> XenServer live memory snapshots of windows VMs uses a windows API to
> quiesce IO, pauses the domain, performs a non-live save, then unpauses
> the domain.
> 
> Such an action would want the these bits in the bitmap, but would not
> match those conditions.

As said - the conditions could be adjusted (e.g. to also include
tool stack paused domains).

>>> It would probably be better to hook this off a specific request from the
>>> toolstack, as the migration code is in a far better position to know
>>> whether this information is needed or can be deferred to the next iteration.
>> Which next iteration (when we're talking about the final one)?
>>
>> I considered tool stack involvement, but would like to go that
>> route only if unavoidable.
> 
> It is wrong for Xen to guess.  The toolstack should explicitly ask for
> them when it wants them.
> 
> I have just written my slides for Seattle, which cover some of the
> outstanding issues with regards to guests and logdirty.  As migration
> with nested virt doesn't function at all, fixing these entries in the
> logdirty bitmap is not urgent if you don't fancy extending/tweaking
> xen_domctl_shadow_op.

Agreed - while I'd like patch 1 to go in for 4.6, this one can wait.

Jan

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/2] x86/HVM: hvm_map_guest_frame_rw() adjustments
  2015-08-12 14:16 ` [PATCH v2 1/2] " Jan Beulich
  2015-08-12 15:44   ` Andrew Cooper
@ 2015-08-13  9:22   ` Wei Liu
  2015-08-13  9:33     ` Jan Beulich
  2015-08-14  7:28   ` Tian, Kevin
  2 siblings, 1 reply; 13+ messages in thread
From: Wei Liu @ 2015-08-13  9:22 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, Wei Liu, Jun Nakajima, Andrew Cooper,
	Aravind Gopalakrishnan, suravee.suthikulpanit, xen-devel,
	Boris Ostrovsky, Keir Fraser

On Wed, Aug 12, 2015 at 08:16:33AM -0600, Jan Beulich wrote:
> ... and its callers.
> 
> While all non-nested users are made fully honor the semantics of that
> type, doing so in the nested case seemed insane (if doable at all,
> considering VMCS shadowing), and hence there the respective operations
> are simply made fail.
> 
> One case not (yet) taken care of is that of a page getting transitioned
> to this type after a mapping got established.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Is this a bug fix? I think so, but the title only says adjustment so I'd
better be sure.

Wei.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/2] x86/HVM: hvm_map_guest_frame_rw() adjustments
  2015-08-13  9:22   ` Wei Liu
@ 2015-08-13  9:33     ` Jan Beulich
  2015-08-13  9:35       ` Wei Liu
  0 siblings, 1 reply; 13+ messages in thread
From: Jan Beulich @ 2015-08-13  9:33 UTC (permalink / raw)
  To: Wei Liu
  Cc: Kevin Tian, Keir Fraser, suravee.suthikulpanit, Andrew Cooper,
	Aravind Gopalakrishnan, Jun Nakajima, xen-devel, Boris Ostrovsky

>>> On 13.08.15 at 11:22, <wei.liu2@citrix.com> wrote:
> On Wed, Aug 12, 2015 at 08:16:33AM -0600, Jan Beulich wrote:
>> ... and its callers.
>> 
>> While all non-nested users are made fully honor the semantics of that
>> type, doing so in the nested case seemed insane (if doable at all,
>> considering VMCS shadowing), and hence there the respective operations
>> are simply made fail.
>> 
>> One case not (yet) taken care of is that of a page getting transitioned
>> to this type after a mapping got established.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> Is this a bug fix? I think so, but the title only says adjustment so I'd
> better be sure.

Yes, it is (I had hoped that the description would be sufficient to
tell).

Jan

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/2] x86/HVM: hvm_map_guest_frame_rw() adjustments
  2015-08-13  9:33     ` Jan Beulich
@ 2015-08-13  9:35       ` Wei Liu
  2015-08-13 10:09         ` Jan Beulich
  0 siblings, 1 reply; 13+ messages in thread
From: Wei Liu @ 2015-08-13  9:35 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, Wei Liu, suravee.suthikulpanit, Andrew Cooper,
	Aravind Gopalakrishnan, Jun Nakajima, xen-devel, Boris Ostrovsky,
	Keir Fraser

On Thu, Aug 13, 2015 at 03:33:42AM -0600, Jan Beulich wrote:
> >>> On 13.08.15 at 11:22, <wei.liu2@citrix.com> wrote:
> > On Wed, Aug 12, 2015 at 08:16:33AM -0600, Jan Beulich wrote:
> >> ... and its callers.
> >> 
> >> While all non-nested users are made fully honor the semantics of that
> >> type, doing so in the nested case seemed insane (if doable at all,
> >> considering VMCS shadowing), and hence there the respective operations
> >> are simply made fail.
> >> 
> >> One case not (yet) taken care of is that of a page getting transitioned
> >> to this type after a mapping got established.
> >> 
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> > 
> > Is this a bug fix? I think so, but the title only says adjustment so I'd
> > better be sure.
> 
> Yes, it is (I had hoped that the description would be sufficient to
> tell).
> 

Thanks for confirmation.

Release-acked-by: Wei Liu <wei.liu2@citrix.com>


> Jan

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/2] x86/HVM: hvm_map_guest_frame_rw() adjustments
  2015-08-13  9:35       ` Wei Liu
@ 2015-08-13 10:09         ` Jan Beulich
  0 siblings, 0 replies; 13+ messages in thread
From: Jan Beulich @ 2015-08-13 10:09 UTC (permalink / raw)
  To: Wei Liu
  Cc: Kevin Tian, Keir Fraser, suravee.suthikulpanit, AndrewCooper,
	Aravind Gopalakrishnan, Jun Nakajima, xen-devel, Boris Ostrovsky

>>> On 13.08.15 at 11:35, <wei.liu2@citrix.com> wrote:
> On Thu, Aug 13, 2015 at 03:33:42AM -0600, Jan Beulich wrote:
>> >>> On 13.08.15 at 11:22, <wei.liu2@citrix.com> wrote:
>> > On Wed, Aug 12, 2015 at 08:16:33AM -0600, Jan Beulich wrote:
>> >> ... and its callers.
>> >> 
>> >> While all non-nested users are made fully honor the semantics of that
>> >> type, doing so in the nested case seemed insane (if doable at all,
>> >> considering VMCS shadowing), and hence there the respective operations
>> >> are simply made fail.
>> >> 
>> >> One case not (yet) taken care of is that of a page getting transitioned
>> >> to this type after a mapping got established.
>> >> 
>> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> > 
>> > Is this a bug fix? I think so, but the title only says adjustment so I'd
>> > better be sure.
>> 
>> Yes, it is (I had hoped that the description would be sufficient to
>> tell).
> 
> Thanks for confirmation.

And I only now realize that I screwed up the title - what is here
was what 0/2 had; this patch really is supposed to be named
"x86/HVM: honor p2m_ram_ro in hvm_map_guest_frame_rw()"
just like its v1 was. I'm sorry for that.

Jan

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/2] x86/HVM: hvm_map_guest_frame_rw() adjustments
  2015-08-12 14:16 ` [PATCH v2 1/2] " Jan Beulich
  2015-08-12 15:44   ` Andrew Cooper
  2015-08-13  9:22   ` Wei Liu
@ 2015-08-14  7:28   ` Tian, Kevin
  2 siblings, 0 replies; 13+ messages in thread
From: Tian, Kevin @ 2015-08-14  7:28 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Wei Liu, suravee.suthikulpanit, Andrew Cooper,
	Aravind Gopalakrishnan, Nakajima, Jun, Keir Fraser,
	Boris Ostrovsky

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Wednesday, August 12, 2015 10:17 PM
> 
> ... and its callers.
> 
> While all non-nested users are made fully honor the semantics of that
> type, doing so in the nested case seemed insane (if doable at all,
> considering VMCS shadowing), and hence there the respective operations
> are simply made fail.
> 
> One case not (yet) taken care of is that of a page getting transitioned
> to this type after a mapping got established.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Kevin Tian <kevin.tian@intel.com>

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2015-08-14  7:29 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-08-12 14:09 [PATCH v2 0/2] x86/HVM: hvm_map_guest_frame_rw() adjustments Jan Beulich
2015-08-12 14:16 ` [PATCH v2 1/2] " Jan Beulich
2015-08-12 15:44   ` Andrew Cooper
2015-08-13  9:22   ` Wei Liu
2015-08-13  9:33     ` Jan Beulich
2015-08-13  9:35       ` Wei Liu
2015-08-13 10:09         ` Jan Beulich
2015-08-14  7:28   ` Tian, Kevin
2015-08-12 14:19 ` [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw() Jan Beulich
2015-08-12 15:13   ` Andrew Cooper
2015-08-12 15:26     ` Jan Beulich
2015-08-12 17:24       ` Andrew Cooper
2015-08-13  6:32         ` Jan Beulich

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).