All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t
@ 2016-12-14 14:26 Andrew Cooper
  2016-12-14 14:26 ` [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t Andrew Cooper
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Andrew Cooper @ 2016-12-14 14:26 UTC (permalink / raw)
  To: Xen-devel
  Cc: Stefano Stabellini, George Dunlap, Andrew Cooper, Tim Deegan,
	Julien Grall, Jan Beulich

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Julien Grall <julien.grall@arm.com>

The one use of paging_mark_dirty() in common/tmem shows that TMEM currently
wont compile for ARM.

I considered introducing a common prototype in include/xen/paging.h which can
be overriden by include/asm/paging.h, which would also allow the removal of
gnttab_mark_dirty() which seems to exist only to stub out other common uses.

If this is considered a good idea, I'd prefer to submit a separate patch than
to merge it into this one.
---
 xen/arch/x86/debug.c              |  2 +-
 xen/arch/x86/hvm/hvm.c            | 12 ++++++------
 xen/arch/x86/hvm/ioreq.c          |  2 +-
 xen/arch/x86/mm.c                 | 16 ++++++++--------
 xen/arch/x86/mm/guest_walk.c      |  8 ++++----
 xen/arch/x86/mm/mem_sharing.c     |  2 +-
 xen/arch/x86/mm/p2m-pod.c         |  2 +-
 xen/arch/x86/mm/paging.c          |  5 +----
 xen/arch/x86/mm/shadow/common.c   |  6 +++---
 xen/arch/x86/mm/shadow/multi.c    |  2 +-
 xen/common/tmem_xen.c             |  2 +-
 xen/include/asm-x86/grant_table.h |  2 +-
 xen/include/asm-x86/paging.h      |  2 +-
 13 files changed, 30 insertions(+), 33 deletions(-)

diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index 3030022..259b8c4 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -181,7 +181,7 @@ unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
         if ( toaddr )
         {
             copy_from_user(va, buf, pagecnt);    /* va = buf */
-            paging_mark_dirty(dp, mfn_x(mfn));
+            paging_mark_dirty(dp, mfn);
         }
         else
         {
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 61f5029..a589b17 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1923,7 +1923,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
          */
         if ( npfec.write_access )
         {
-            paging_mark_dirty(currd, mfn_x(mfn));
+            paging_mark_dirty(currd, mfn);
             /*
              * If p2m is really an altp2m, unlock here to avoid lock ordering
              * violation when the change below is propagated from host p2m.
@@ -2613,7 +2613,7 @@ static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
         if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
         else if ( !permanent )
-            paging_mark_dirty(d, page_to_mfn(page));
+            paging_mark_dirty(d, _mfn(page_to_mfn(page)));
     }
 
     if ( !permanent )
@@ -2676,7 +2676,7 @@ void hvm_unmap_guest_frame(void *p, bool_t permanent)
         list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
             if ( track->page == page )
             {
-                paging_mark_dirty(d, mfn);
+                paging_mark_dirty(d, _mfn(mfn));
                 list_del(&track->list);
                 xfree(track);
                 break;
@@ -2693,7 +2693,7 @@ void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
 
     spin_lock(&d->arch.hvm_domain.write_map.lock);
     list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
-        paging_mark_dirty(d, page_to_mfn(track->page));
+        paging_mark_dirty(d, _mfn(page_to_mfn(track->page)));
     spin_unlock(&d->arch.hvm_domain.write_map.lock);
 }
 
@@ -3211,7 +3211,7 @@ static enum hvm_copy_result __hvm_copy(
                     memcpy(p, buf, count);
                 else
                     memset(p, 0, count);
-                paging_mark_dirty(curr->domain, page_to_mfn(page));
+                paging_mark_dirty(curr->domain, _mfn(page_to_mfn(page)));
             }
         }
         else
@@ -5799,7 +5799,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
             page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
             if ( page )
             {
-                paging_mark_dirty(d, page_to_mfn(page));
+                paging_mark_dirty(d, _mfn(page_to_mfn(page)));
                 /* These are most probably not page tables any more */
                 /* don't take a long time and don't die either */
                 sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 88071ab..e1123dc 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -282,7 +282,7 @@ static int hvm_add_ioreq_gmfn(
     rc = guest_physmap_add_page(d, _gfn(iorp->gmfn),
                                 _mfn(page_to_mfn(iorp->page)), 0);
     if ( rc == 0 )
-        paging_mark_dirty(d, page_to_mfn(iorp->page));
+        paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
 
     return rc;
 }
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c5dd6f2..24a5211 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2251,7 +2251,7 @@ static int alloc_page_type(struct page_info *page, unsigned long type,
 
     /* A page table is dirtied when its type count becomes non-zero. */
     if ( likely(owner != NULL) )
-        paging_mark_dirty(owner, page_to_mfn(page));
+        paging_mark_dirty(owner, _mfn(page_to_mfn(page)));
 
     switch ( type & PGT_type_mask )
     {
@@ -2325,7 +2325,7 @@ int free_page_type(struct page_info *page, unsigned long type,
     if ( likely(owner != NULL) && unlikely(paging_mode_enabled(owner)) )
     {
         /* A page table is dirtied when its type count becomes zero. */
-        paging_mark_dirty(owner, page_to_mfn(page));
+        paging_mark_dirty(owner, _mfn(page_to_mfn(page)));
 
         if ( shadow_mode_refcounts(owner) )
             return 0;
@@ -3247,7 +3247,7 @@ long do_mmuext_op(
                 goto pin_drop;
 
             /* A page is dirtied when its pin status is set. */
-            paging_mark_dirty(pg_owner, page_to_mfn(page));
+            paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
 
             /* We can race domain destruction (domain_relinquish_resources). */
             if ( unlikely(pg_owner != d) )
@@ -3307,7 +3307,7 @@ long do_mmuext_op(
             put_page(page);
 
             /* A page is dirtied when its pin status is cleared. */
-            paging_mark_dirty(pg_owner, page_to_mfn(page));
+            paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
 
             break;
         }
@@ -3516,7 +3516,7 @@ long do_mmuext_op(
             }
 
             /* A page is dirtied when it's being cleared. */
-            paging_mark_dirty(pg_owner, page_to_mfn(page));
+            paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
 
             clear_domain_page(_mfn(page_to_mfn(page)));
 
@@ -3551,7 +3551,7 @@ long do_mmuext_op(
             }
 
             /* A page is dirtied when it's being copied to. */
-            paging_mark_dirty(pg_owner, page_to_mfn(dst_page));
+            paging_mark_dirty(pg_owner, _mfn(page_to_mfn(dst_page)));
 
             copy_domain_page(_mfn(page_to_mfn(dst_page)),
                              _mfn(page_to_mfn(src_page)));
@@ -3894,7 +3894,7 @@ long do_mmu_update(
 
             set_gpfn_from_mfn(mfn, gpfn);
 
-            paging_mark_dirty(pg_owner, mfn);
+            paging_mark_dirty(pg_owner, _mfn(mfn));
 
             put_page(mfn_to_page(mfn));
             break;
@@ -4700,7 +4700,7 @@ long do_update_descriptor(u64 pa, u64 desc)
         break;
     }
 
-    paging_mark_dirty(dom, mfn);
+    paging_mark_dirty(dom, _mfn(mfn));
 
     /* All is good so make the update. */
     gdt_pent = map_domain_page(_mfn(mfn));
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 868e909..250a2b3 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -396,21 +396,21 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     {
 #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */
         if ( set_ad_bits(l4p + guest_l4_table_offset(va), &gw->l4e, 0) )
-            paging_mark_dirty(d, mfn_x(gw->l4mfn));
+            paging_mark_dirty(d, gw->l4mfn);
         if ( set_ad_bits(l3p + guest_l3_table_offset(va), &gw->l3e,
                          (pse1G && (pfec & PFEC_write_access))) )
-            paging_mark_dirty(d, mfn_x(gw->l3mfn));
+            paging_mark_dirty(d, gw->l3mfn);
 #endif
         if ( !pse1G ) 
         {
             if ( set_ad_bits(l2p + guest_l2_table_offset(va), &gw->l2e,
                              (pse2M && (pfec & PFEC_write_access))) )
-                paging_mark_dirty(d, mfn_x(gw->l2mfn));            
+                paging_mark_dirty(d, gw->l2mfn);
             if ( !pse2M ) 
             {
                 if ( set_ad_bits(l1p + guest_l1_table_offset(va), &gw->l1e, 
                                  (pfec & PFEC_write_access)) )
-                    paging_mark_dirty(d, mfn_x(gw->l1mfn));
+                    paging_mark_dirty(d, gw->l1mfn);
             }
         }
     }
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 25ff6a6..db7f389 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1210,7 +1210,7 @@ int __mem_sharing_unshare_page(struct domain *d,
 
     /* Now that the gfn<->mfn map is properly established,
      * marking dirty is feasible */
-    paging_mark_dirty(d, mfn_x(page_to_mfn(page)));
+    paging_mark_dirty(d, page_to_mfn(page));
     /* We do not need to unlock a private page */
     put_gfn(d, gfn);
     return 0;
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 149f529..367ee00 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1112,7 +1112,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
     for( i = 0; i < (1UL << order); i++ )
     {
         set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
-        paging_mark_dirty(d, mfn_x(mfn) + i);
+        paging_mark_dirty(d, mfn_add(mfn, i));
     }
     
     p2m->pod.entry_count -= (1 << order);
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 4437611..3a66098 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -343,12 +343,9 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
 }
 
 /* Mark a page as dirty */
-void paging_mark_dirty(struct domain *d, unsigned long guest_mfn)
+void paging_mark_dirty(struct domain *d, mfn_t gmfn)
 {
     unsigned long pfn;
-    mfn_t gmfn;
-
-    gmfn = _mfn(guest_mfn);
 
     if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
          page_get_owner(mfn_to_page(gmfn)) != d )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 0ba4153..126dfa8 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -999,7 +999,7 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size)
     int result = 0;
     struct page_info *page = mfn_to_page(gmfn);
 
-    paging_mark_dirty(v->domain, mfn_x(gmfn));
+    paging_mark_dirty(v->domain, gmfn);
 
     // Determine which types of shadows are affected, and update each.
     //
@@ -1818,11 +1818,11 @@ void sh_emulate_unmap_dest(struct vcpu *v, void *addr, unsigned int bytes,
             sh_validate_guest_pt_write(v, sh_ctxt->mfn[1], addr + b1, b2);
     }
 
-    paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[0]));
+    paging_mark_dirty(v->domain, sh_ctxt->mfn[0]);
 
     if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) )
     {
-        paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[1]));
+        paging_mark_dirty(v->domain, sh_ctxt->mfn[1]);
         vunmap((void *)((unsigned long)addr & PAGE_MASK));
     }
     else
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 336d24f..805c056 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -675,7 +675,7 @@ _sh_propagate(struct vcpu *v,
     {
         if ( mfn_valid(target_mfn) ) {
             if ( ft & FETCH_TYPE_WRITE )
-                paging_mark_dirty(d, mfn_x(target_mfn));
+                paging_mark_dirty(d, target_mfn);
             else if ( !paging_mfn_is_dirty(d, target_mfn) )
                 sflags &= ~_PAGE_RW;
         }
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 84ae7fd..7d60b71 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -80,7 +80,7 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
     if ( mark_dirty )
     {
         put_page_and_type(cli_pfp);
-        paging_mark_dirty(current->domain,cli_mfn);
+        paging_mark_dirty(current->domain, _mfn(cli_mfn));
     }
     else
         put_page(cli_pfp);
diff --git a/xen/include/asm-x86/grant_table.h b/xen/include/asm-x86/grant_table.h
index 8c9bbcf..e1b3391 100644
--- a/xen/include/asm-x86/grant_table.h
+++ b/xen/include/asm-x86/grant_table.h
@@ -46,7 +46,7 @@ int replace_grant_host_mapping(
 #define gnttab_status_gmfn(d, t, i)                     \
     (mfn_to_gmfn(d, gnttab_status_mfn(t, i)))
 
-#define gnttab_mark_dirty(d, f) paging_mark_dirty((d), (f))
+#define gnttab_mark_dirty(d, f) paging_mark_dirty((d), _mfn(f))
 
 static inline void gnttab_clear_flag(unsigned int nr, uint16_t *st)
 {
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 2243aa1..63e3867 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -157,7 +157,7 @@ void paging_log_dirty_init(struct domain *d,
                            void (*clean_dirty_bitmap)(struct domain *d));
 
 /* mark a page as dirty */
-void paging_mark_dirty(struct domain *d, unsigned long guest_mfn);
+void paging_mark_dirty(struct domain *d, mfn_t gmfn);
 /* mark a page as dirty with taking guest pfn as parameter */
 void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t
  2016-12-14 14:26 [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t Andrew Cooper
@ 2016-12-14 14:26 ` Andrew Cooper
  2016-12-14 15:22   ` Jan Beulich
  2016-12-20  5:08   ` Tian, Kevin
  2016-12-14 15:13 ` [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t Jan Beulich
  2016-12-14 15:36 ` Tim Deegan
  2 siblings, 2 replies; 9+ messages in thread
From: Andrew Cooper @ 2016-12-14 14:26 UTC (permalink / raw)
  To: Xen-devel
  Cc: Kevin Tian, Jan Beulich, George Dunlap, Andrew Cooper,
	Tim Deegan, Jun Nakajima

paging_mark_gfn_dirty() actually takes a pfn, even by paramter name.  Rename
the function and alter the type to pfn_t to match.

Push pfn_t into the LOGDIRTY_IDX() macros, and clean up a couple of local
variable types in paging_mark_pfn_dirty().

Leave an explicit comment in vmx_vcpu_flush_pml_buffer() when we intentally
perform a straight conversion from gfn to pfn.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/x86/hvm/vmx/vmcs.c  |  4 +++-
 xen/arch/x86/mm/paging.c     | 26 +++++++++++++-------------
 xen/include/asm-x86/paging.h | 10 +++++-----
 3 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 0995496..5db5fea 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1484,7 +1484,9 @@ void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
          * is extremely difficult to debug.
          */
         p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
-        paging_mark_gfn_dirty(v->domain, gfn);
+
+        /* HVM guest: pfn == gfn */
+        paging_mark_pfn_dirty(v->domain, _pfn(gfn));
     }
 
     unmap_domain_page(pml_buf);
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 3a66098..0f3d885 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -265,25 +265,25 @@ static int paging_log_dirty_disable(struct domain *d, bool_t resuming)
 }
 
 /* Mark a page as dirty, with taking guest pfn as parameter */
-void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
+void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
 {
-    int changed;
+    bool changed;
     mfn_t mfn, *l4, *l3, *l2;
     unsigned long *l1;
-    int i1, i2, i3, i4;
+    unsigned int i1, i2, i3, i4;
 
     if ( !paging_mode_log_dirty(d) )
         return;
 
     /* Shared MFNs should NEVER be marked dirty */
-    BUG_ON(SHARED_M2P(pfn));
+    BUG_ON(SHARED_M2P(pfn_x(pfn)));
 
     /*
      * Values with the MSB set denote MFNs that aren't really part of the
      * domain's pseudo-physical memory map (e.g., the shared info frame).
      * Nothing to do here...
      */
-    if ( unlikely(!VALID_M2P(pfn)) )
+    if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
         return;
 
     i1 = L1_LOGDIRTY_IDX(pfn);
@@ -331,8 +331,8 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
     if ( changed )
     {
         PAGING_DEBUG(LOGDIRTY,
-                     "marked mfn %" PRI_mfn " (pfn=%lx), dom %d\n",
-                     mfn_x(mfn), pfn, d->domain_id);
+                     "marked mfn %" PRI_mfn " (pfn %" PRI_pfn "), dom %d\n",
+                     mfn_x(mfn), pfn_x(pfn), d->domain_id);
         d->arch.paging.log_dirty.dirty_count++;
     }
 
@@ -345,23 +345,23 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
 /* Mark a page as dirty */
 void paging_mark_dirty(struct domain *d, mfn_t gmfn)
 {
-    unsigned long pfn;
+    pfn_t pfn;
 
     if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
          page_get_owner(mfn_to_page(gmfn)) != d )
         return;
 
     /* We /really/ mean PFN here, even for non-translated guests. */
-    pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+    pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
 
-    paging_mark_gfn_dirty(d, pfn);
+    paging_mark_pfn_dirty(d, pfn);
 }
 
 
 /* Is this guest page dirty? */
 int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
 {
-    unsigned long pfn;
+    pfn_t pfn;
     mfn_t mfn, *l4, *l3, *l2;
     unsigned long *l1;
     int rv;
@@ -370,9 +370,9 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
     ASSERT(paging_mode_log_dirty(d));
 
     /* We /really/ mean PFN here, even for non-translated guests. */
-    pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+    pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
     /* Shared pages are always read-only; invalid pages can't be dirty. */
-    if ( unlikely(SHARED_M2P(pfn) || !VALID_M2P(pfn)) )
+    if ( unlikely(SHARED_M2P(pfn_x(pfn)) || !VALID_M2P(pfn_x(pfn))) )
         return 0;
 
     mfn = d->arch.paging.log_dirty.top;
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 63e3867..cec6bfd 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -159,7 +159,7 @@ void paging_log_dirty_init(struct domain *d,
 /* mark a page as dirty */
 void paging_mark_dirty(struct domain *d, mfn_t gmfn);
 /* mark a page as dirty with taking guest pfn as parameter */
-void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn);
+void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn);
 
 /* is this guest page dirty? 
  * This is called from inside paging code, with the paging lock held. */
@@ -175,12 +175,12 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn);
  * TODO2: Abstract out the radix-tree mechanics?
  */
 #define LOGDIRTY_NODE_ENTRIES (1 << PAGETABLE_ORDER)
-#define L1_LOGDIRTY_IDX(pfn) ((pfn) & ((1 << (PAGE_SHIFT+3)) - 1))
-#define L2_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3)) & \
+#define L1_LOGDIRTY_IDX(pfn) (pfn_x(pfn) & ((1 << (PAGE_SHIFT + 3)) - 1))
+#define L2_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3)) & \
                               (LOGDIRTY_NODE_ENTRIES-1))
-#define L3_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3+PAGETABLE_ORDER)) & \
+#define L3_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER)) & \
                               (LOGDIRTY_NODE_ENTRIES-1))
-#define L4_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3+PAGETABLE_ORDER*2)) & \
+#define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \
                               (LOGDIRTY_NODE_ENTRIES-1))
 
 /* VRAM dirty tracking support */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t
  2016-12-14 14:26 [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t Andrew Cooper
  2016-12-14 14:26 ` [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t Andrew Cooper
@ 2016-12-14 15:13 ` Jan Beulich
  2016-12-14 15:28   ` Andrew Cooper
  2016-12-14 15:36 ` Tim Deegan
  2 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2016-12-14 15:13 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Stefano Stabellini, George Dunlap, Tim Deegan, Xen-devel, JulienGrall

>>> On 14.12.16 at 15:26, <andrew.cooper3@citrix.com> wrote:
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Tim Deegan <tim@xen.org>
> CC: George Dunlap <george.dunlap@eu.citrix.com>
> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> CC: Stefano Stabellini <sstabellini@kernel.org>
> CC: Julien Grall <julien.grall@arm.com>
> 
> The one use of paging_mark_dirty() in common/tmem shows that TMEM currently
> wont compile for ARM.

I don't understand: It builds prior to this change, so why would it
stop building afterwards? Without this remark I'd have given my
R-b ...

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t
  2016-12-14 14:26 ` [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t Andrew Cooper
@ 2016-12-14 15:22   ` Jan Beulich
  2016-12-14 15:30     ` Andrew Cooper
  2016-12-20  5:08   ` Tian, Kevin
  1 sibling, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2016-12-14 15:22 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: George Dunlap, KevinTian, Tim Deegan, Jun Nakajima, Xen-devel

>>> On 14.12.16 at 15:26, <andrew.cooper3@citrix.com> wrote:
> paging_mark_gfn_dirty() actually takes a pfn, even by paramter name.  Rename
> the function and alter the type to pfn_t to match.
> 
> Push pfn_t into the LOGDIRTY_IDX() macros, and clean up a couple of local
> variable types in paging_mark_pfn_dirty().
> 
> Leave an explicit comment in vmx_vcpu_flush_pml_buffer() when we intentally
> perform a straight conversion from gfn to pfn.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
with two remarks:

> @@ -331,8 +331,8 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
>      if ( changed )
>      {
>          PAGING_DEBUG(LOGDIRTY,
> -                     "marked mfn %" PRI_mfn " (pfn=%lx), dom %d\n",
> -                     mfn_x(mfn), pfn, d->domain_id);
> +                     "marked mfn %" PRI_mfn " (pfn %" PRI_pfn "), dom %d\n",
> +                     mfn_x(mfn), pfn_x(pfn), d->domain_id);

Mind making the domain part canonical (i.e. d%d), and perhaps
even moving it to the front ("d%d: ...\n")?

> @@ -345,23 +345,23 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
>  /* Mark a page as dirty */
>  void paging_mark_dirty(struct domain *d, mfn_t gmfn)
>  {
> -    unsigned long pfn;
> +    pfn_t pfn;
>  
>      if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
>           page_get_owner(mfn_to_page(gmfn)) != d )
>          return;
>  
>      /* We /really/ mean PFN here, even for non-translated guests. */
> -    pfn = get_gpfn_from_mfn(mfn_x(gmfn));
> +    pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
>  
> -    paging_mark_gfn_dirty(d, pfn);
> +    paging_mark_pfn_dirty(d, pfn);
>  }

Looking at all of this, could patch 1 perhaps rename gmfn to mfn
in this function?

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t
  2016-12-14 15:13 ` [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t Jan Beulich
@ 2016-12-14 15:28   ` Andrew Cooper
  2016-12-14 15:48     ` Konrad Rzeszutek Wilk
  0 siblings, 1 reply; 9+ messages in thread
From: Andrew Cooper @ 2016-12-14 15:28 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, George Dunlap, Tim Deegan, Xen-devel, JulienGrall

On 14/12/16 15:13, Jan Beulich wrote:
>>>> On 14.12.16 at 15:26, <andrew.cooper3@citrix.com> wrote:
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> ---
>> CC: Jan Beulich <JBeulich@suse.com>
>> CC: Tim Deegan <tim@xen.org>
>> CC: George Dunlap <george.dunlap@eu.citrix.com>
>> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>> CC: Stefano Stabellini <sstabellini@kernel.org>
>> CC: Julien Grall <julien.grall@arm.com>
>>
>> The one use of paging_mark_dirty() in common/tmem shows that TMEM currently
>> wont compile for ARM.
> I don't understand: It builds prior to this change, so why would it
> stop building afterwards? Without this remark I'd have given my
> R-b ...

Oh.  cli_{get,put}_page() are stubbed to ASSERT(0) on ARM, which
restricts the paging_mark_dirty() call to !ARM.

The history is weird here.  The last change here was you taking out the
__ia64__ code.

I can't work out why they should be stubbed separately; there is nothing
overly x86-specific in them.

Konrad: Any ideas?

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t
  2016-12-14 15:22   ` Jan Beulich
@ 2016-12-14 15:30     ` Andrew Cooper
  0 siblings, 0 replies; 9+ messages in thread
From: Andrew Cooper @ 2016-12-14 15:30 UTC (permalink / raw)
  To: Jan Beulich; +Cc: George Dunlap, KevinTian, Tim Deegan, Jun Nakajima, Xen-devel

On 14/12/16 15:22, Jan Beulich wrote:
>>>> On 14.12.16 at 15:26, <andrew.cooper3@citrix.com> wrote:
>> paging_mark_gfn_dirty() actually takes a pfn, even by paramter name.  Rename
>> the function and alter the type to pfn_t to match.
>>
>> Push pfn_t into the LOGDIRTY_IDX() macros, and clean up a couple of local
>> variable types in paging_mark_pfn_dirty().
>>
>> Leave an explicit comment in vmx_vcpu_flush_pml_buffer() when we intentally
>> perform a straight conversion from gfn to pfn.
>>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> with two remarks:
>
>> @@ -331,8 +331,8 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
>>      if ( changed )
>>      {
>>          PAGING_DEBUG(LOGDIRTY,
>> -                     "marked mfn %" PRI_mfn " (pfn=%lx), dom %d\n",
>> -                     mfn_x(mfn), pfn, d->domain_id);
>> +                     "marked mfn %" PRI_mfn " (pfn %" PRI_pfn "), dom %d\n",
>> +                     mfn_x(mfn), pfn_x(pfn), d->domain_id);
> Mind making the domain part canonical (i.e. d%d), and perhaps
> even moving it to the front ("d%d: ...\n")?

Ok.

>
>> @@ -345,23 +345,23 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
>>  /* Mark a page as dirty */
>>  void paging_mark_dirty(struct domain *d, mfn_t gmfn)
>>  {
>> -    unsigned long pfn;
>> +    pfn_t pfn;
>>  
>>      if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
>>           page_get_owner(mfn_to_page(gmfn)) != d )
>>          return;
>>  
>>      /* We /really/ mean PFN here, even for non-translated guests. */
>> -    pfn = get_gpfn_from_mfn(mfn_x(gmfn));
>> +    pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
>>  
>> -    paging_mark_gfn_dirty(d, pfn);
>> +    paging_mark_pfn_dirty(d, pfn);
>>  }
> Looking at all of this, could patch 1 perhaps rename gmfn to mfn
> in this function?

This actually follows the shadow naming convention, of an mfn belonging
to a guest, and as checked in the first if clause.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t
  2016-12-14 14:26 [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t Andrew Cooper
  2016-12-14 14:26 ` [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t Andrew Cooper
  2016-12-14 15:13 ` [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t Jan Beulich
@ 2016-12-14 15:36 ` Tim Deegan
  2 siblings, 0 replies; 9+ messages in thread
From: Tim Deegan @ 2016-12-14 15:36 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: George Dunlap, Julien Grall, Stefano Stabellini, Jan Beulich, Xen-devel



At 14:26 +0000 on 14 Dec (1481725588), Andrew Cooper wrote:
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Both patches Acked-by: Tim Deegan <tim@xen.org>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t
  2016-12-14 15:28   ` Andrew Cooper
@ 2016-12-14 15:48     ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 9+ messages in thread
From: Konrad Rzeszutek Wilk @ 2016-12-14 15:48 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Stefano Stabellini, George Dunlap, Tim Deegan, Xen-devel,
	JulienGrall, Jan Beulich

On Wed, Dec 14, 2016 at 03:28:49PM +0000, Andrew Cooper wrote:
> On 14/12/16 15:13, Jan Beulich wrote:
> >>>> On 14.12.16 at 15:26, <andrew.cooper3@citrix.com> wrote:
> >> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> >> ---
> >> CC: Jan Beulich <JBeulich@suse.com>
> >> CC: Tim Deegan <tim@xen.org>
> >> CC: George Dunlap <george.dunlap@eu.citrix.com>
> >> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> >> CC: Stefano Stabellini <sstabellini@kernel.org>
> >> CC: Julien Grall <julien.grall@arm.com>
> >>
> >> The one use of paging_mark_dirty() in common/tmem shows that TMEM currently
> >> wont compile for ARM.
> > I don't understand: It builds prior to this change, so why would it
> > stop building afterwards? Without this remark I'd have given my
> > R-b ...
> 
> Oh.  cli_{get,put}_page() are stubbed to ASSERT(0) on ARM, which
> restricts the paging_mark_dirty() call to !ARM.
> 
> The history is weird here.  The last change here was you taking out the
> __ia64__ code.
> 
> I can't work out why they should be stubbed separately; there is nothing
> overly x86-specific in them.
> 
> Konrad: Any ideas?

My recollection was that nobody had tried tmem under ARM and it made
it just easier to bypass it until the security audit is completed.

But ARM is the perfect candidate for tmem.. I did have enabling tmem
on ARM on my TODO list but it is below migration. Hm.

I am with changes that look "OKish" to the tmem and then fixing
them when I get my hands on enabling tmem on ARM.

> 
> ~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t
  2016-12-14 14:26 ` [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t Andrew Cooper
  2016-12-14 15:22   ` Jan Beulich
@ 2016-12-20  5:08   ` Tian, Kevin
  1 sibling, 0 replies; 9+ messages in thread
From: Tian, Kevin @ 2016-12-20  5:08 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel
  Cc: George Dunlap, Tim Deegan, Nakajima, Jun, Jan Beulich

> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: Wednesday, December 14, 2016 10:26 PM
> 
> paging_mark_gfn_dirty() actually takes a pfn, even by paramter name.  Rename
> the function and alter the type to pfn_t to match.
> 
> Push pfn_t into the LOGDIRTY_IDX() macros, and clean up a couple of local
> variable types in paging_mark_pfn_dirty().
> 
> Leave an explicit comment in vmx_vcpu_flush_pml_buffer() when we intentally
> perform a straight conversion from gfn to pfn.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-12-20  5:08 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-14 14:26 [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t Andrew Cooper
2016-12-14 14:26 ` [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t Andrew Cooper
2016-12-14 15:22   ` Jan Beulich
2016-12-14 15:30     ` Andrew Cooper
2016-12-20  5:08   ` Tian, Kevin
2016-12-14 15:13 ` [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t Jan Beulich
2016-12-14 15:28   ` Andrew Cooper
2016-12-14 15:48     ` Konrad Rzeszutek Wilk
2016-12-14 15:36 ` Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.