xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
@ 2015-07-02 12:04 Ben Catterall
  2015-07-02 12:04 ` [PATCH v2 2/3] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
                   ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Ben Catterall @ 2015-07-02 12:04 UTC (permalink / raw)
  To: xen-devel
  Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini, Jan Beulich

From: Andrew Cooper <andrew.cooper3@citrix.com>

The sh_map/unmap wrappers can be dropped, and take the opportunity to turn
some #define's into static inlines, for added type saftey.

As part of adding the type safety, GCC highlights an problematic include cycle
with arm/mm.h including domain_page.h which includes xen/mm.h and falls over
__page_to_mfn being used before being declared.  Simply dropping the inclusion
of domain_page.h fixes the compilation issue.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: Ian Campbell <ian.campbell@citrix.com>
CC: Stefano Stabellini <stefano.stabellini@citrix.com>
---
 xen/arch/arm/mm.c                |  6 ++----
 xen/arch/x86/domain_page.c       |  9 ++++-----
 xen/arch/x86/mm/shadow/multi.c   | 10 +++++-----
 xen/arch/x86/mm/shadow/private.h | 12 ------------
 xen/include/asm-arm/mm.h         |  1 -
 xen/include/xen/domain_page.h    | 22 +++++++++++++++++-----
 6 files changed, 28 insertions(+), 32 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index ff1b330..d479048 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -271,11 +271,9 @@ void clear_fixmap(unsigned map)
 }
 
 #ifdef CONFIG_DOMAIN_PAGE
-void *map_domain_page_global(unsigned long mfn)
+void *map_domain_page_global(mfn_t mfn)
 {
-    mfn_t m = _mfn(mfn);
-
-    return vmap(&m, 1);
+    return vmap(&mfn, 1);
 }
 
 void unmap_domain_page_global(const void *va)
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index d684b2f..0f7548b 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -302,17 +302,16 @@ int mapcache_vcpu_init(struct vcpu *v)
     return 0;
 }
 
-void *map_domain_page_global(unsigned long mfn)
+void *map_domain_page_global(mfn_t mfn)
 {
-    mfn_t m = _mfn(mfn);
     ASSERT(!in_irq() && local_irq_is_enabled());
 
 #ifdef NDEBUG
-    if ( mfn <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn);
+    if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+        return mfn_to_virt(mfn_x(mfn));
 #endif
 
-    return vmap(&m, 1);
+    return vmap(&mfn, 1);
 }
 
 void unmap_domain_page_global(const void *ptr)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 42204d9..54d0bd3 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3806,7 +3806,7 @@ sh_detach_old_tables(struct vcpu *v)
     if ( v->arch.paging.shadow.guest_vtable )
     {
         if ( shadow_mode_external(d) || shadow_mode_translate(d) )
-            sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+            unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
         v->arch.paging.shadow.guest_vtable = NULL;
     }
 #endif // !NDEBUG
@@ -3977,8 +3977,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
     if ( shadow_mode_external(d) || shadow_mode_translate(d) )
     {
         if ( v->arch.paging.shadow.guest_vtable )
-            sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
-        v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+            unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+        v->arch.paging.shadow.guest_vtable = map_domain_page_global(gmfn);
         /* PAGING_LEVELS==4 implies 64-bit, which means that
          * map_domain_page_global can't fail */
         BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
@@ -4010,8 +4010,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
     if ( shadow_mode_external(d) || shadow_mode_translate(d) )
     {
         if ( v->arch.paging.shadow.guest_vtable )
-            sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
-        v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+            unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+        v->arch.paging.shadow.guest_vtable = map_domain_page_global(gmfn);
         /* Does this really need map_domain_page_global?  Handle the
          * error properly if so. */
         BUG_ON(v->arch.paging.shadow.guest_vtable == NULL); /* XXX */
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index f72ea9f..eff39dc 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -517,18 +517,6 @@ sh_unmap_domain_page(void *p)
     unmap_domain_page(p);
 }
 
-static inline void *
-sh_map_domain_page_global(mfn_t mfn)
-{
-    return map_domain_page_global(mfn_x(mfn));
-}
-
-static inline void
-sh_unmap_domain_page_global(void *p)
-{
-    unmap_domain_page_global(p);
-}
-
 /**************************************************************************/
 /* Shadow-page refcounting. */
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 3601140..2e1f21a 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -5,7 +5,6 @@
 #include <xen/kernel.h>
 #include <asm/page.h>
 #include <public/xen.h>
-#include <xen/domain_page.h>
 #include <xen/pdx.h>
 
 /* Align Xen to a 2 MiB boundary. */
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index b7a710b..1aac0eb 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -41,11 +41,15 @@ unsigned long domain_page_map_to_mfn(const void *va);
  * address spaces (not just within the VCPU that created the mapping). Global
  * mappings can also be unmapped from any context.
  */
-void *map_domain_page_global(unsigned long mfn);
+void *map_domain_page_global(mfn_t mfn);
 void unmap_domain_page_global(const void *va);
 
 #define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
-#define __map_domain_page_global(pg) map_domain_page_global(__page_to_mfn(pg))
+
+static inline void *__map_domain_page_global(struct page_info *pg)
+{
+    return map_domain_page_global(_mfn(__page_to_mfn(pg)));
+}
 
 #define DMCACHE_ENTRY_VALID 1U
 #define DMCACHE_ENTRY_HELD  2U
@@ -117,9 +121,17 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
                                                       mfn_to_virt(smfn))
 #define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
 
-#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
-#define __map_domain_page_global(pg)        page_to_virt(pg)
-#define unmap_domain_page_global(va)        ((void)(va))
+static inline void *map_domain_page_global(mfn_t mfn)
+{
+    return mfn_to_virt(mfn_x(mfn));
+}
+
+static inline void *__map_domain_page_global(struct page_info *pg)
+{
+    return page_to_virt(pg);
+}
+
+static inline void unmap_domain_page_global(void *va) {};
 
 struct domain_mmap_cache { 
 };
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 2/3] xen/domain_page: Convert copy/clear_domain_page() to using mfn_t
  2015-07-02 12:04 [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
@ 2015-07-02 12:04 ` Ben Catterall
  2015-07-07 10:03   ` Jan Beulich
  2015-07-02 12:04 ` [PATCH v2 3/3] Convert map_domain_page() to use the new mfn_t type Ben Catterall
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 15+ messages in thread
From: Ben Catterall @ 2015-07-02 12:04 UTC (permalink / raw)
  To: xen-devel
  Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini,
	jbeulich, Ben Catterall

From: Andrew Cooper <andrew.cooper3@citrix.com>

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
[Convert grant_table.c to pass mfn_t types and fix ARM compiling]

Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/mm.c             |  7 ++++---
 xen/common/grant_table.c      |  2 +-
 xen/common/kimage.c           | 12 ++++++------
 xen/common/memory.c           | 12 +++++-------
 xen/include/xen/domain_page.h | 15 ++++++---------
 5 files changed, 22 insertions(+), 26 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9e08c9b..8a7524f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3294,7 +3294,7 @@ long do_mmuext_op(
             /* A page is dirtied when it's being cleared. */
             paging_mark_dirty(pg_owner, page_to_mfn(page));
 
-            clear_domain_page(page_to_mfn(page));
+            clear_domain_page(_mfn(page_to_mfn(page)));
 
             put_page_and_type(page);
             break;
@@ -3328,7 +3328,8 @@ long do_mmuext_op(
             /* A page is dirtied when it's being copied to. */
             paging_mark_dirty(pg_owner, page_to_mfn(dst_page));
 
-            copy_domain_page(page_to_mfn(dst_page), page_to_mfn(src_page));
+            copy_domain_page(_mfn(page_to_mfn(dst_page)),
+                             _mfn(page_to_mfn(src_page)));
 
             put_page_and_type(dst_page);
             put_page(src_page);
@@ -6005,7 +6006,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
             pg = alloc_domheap_page(d, MEMF_no_owner);
             if ( pg )
             {
-                clear_domain_page(page_to_mfn(pg));
+                clear_domain_page(_mfn(page_to_mfn(pg)));
                 if ( !IS_NIL(ppg) )
                     *ppg++ = pg;
                 l1tab[l1_table_offset(va)] =
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index a011276..9786ecd 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1732,7 +1732,7 @@ gnttab_transfer(
                 goto unlock_and_copyback;
             }
 
-            copy_domain_page(page_to_mfn(new_page), mfn);
+            copy_domain_page(_mfn(page_to_mfn(new_page)), _mfn(mfn));
 
             page->count_info &= ~(PGC_count_mask|PGC_allocated);
             free_domheap_page(page);
diff --git a/xen/common/kimage.c b/xen/common/kimage.c
index 8c4854d..742e4e8 100644
--- a/xen/common/kimage.c
+++ b/xen/common/kimage.c
@@ -77,7 +77,7 @@ static struct page_info *kimage_alloc_zeroed_page(unsigned memflags)
     if ( !page )
         return NULL;
 
-    clear_domain_page(page_to_mfn(page));
+    clear_domain_page(_mfn(page_to_mfn(page)));
 
     return page;
 }
@@ -409,7 +409,7 @@ static struct page_info *kimage_alloc_crash_control_page(struct kexec_image *ima
     if ( page )
     {
         image->next_crash_page = hole_end;
-        clear_domain_page(page_to_mfn(page));
+        clear_domain_page(_mfn(page_to_mfn(page)));
     }
 
     return page;
@@ -637,15 +637,15 @@ static struct page_info *kimage_alloc_page(struct kexec_image *image,
         if ( old )
         {
             /* If so move it. */
-            unsigned long old_mfn = *old >> PAGE_SHIFT;
-            unsigned long mfn = addr >> PAGE_SHIFT;
+            mfn_t old_mfn = _mfn(*old >> PAGE_SHIFT);
+            mfn_t mfn = _mfn(addr >> PAGE_SHIFT);
 
             copy_domain_page(mfn, old_mfn);
             clear_domain_page(old_mfn);
             *old = (addr & ~PAGE_MASK) | IND_SOURCE;
             unmap_domain_page(old);
 
-            page = mfn_to_page(old_mfn);
+            page = mfn_to_page(mfn_x(old_mfn));
             break;
         }
         else
@@ -917,7 +917,7 @@ int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
                 goto done;
             }
 
-            copy_domain_page(page_to_mfn(xen_page), mfn);
+            copy_domain_page(_mfn(page_to_mfn(xen_page)), _mfn(mfn));
             put_page(guest_page);
 
             ret = kimage_add_page(image, page_to_maddr(xen_page));
diff --git a/xen/common/memory.c b/xen/common/memory.c
index c84fcdd..ae4c32e 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1170,25 +1170,23 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
     return rc;
 }
 
-#ifdef CONFIG_DOMAIN_PAGE
-void clear_domain_page(unsigned long mfn)
+void clear_domain_page(mfn_t mfn)
 {
-    void *ptr = map_domain_page(mfn);
+    void *ptr = map_domain_page(mfn_x(mfn));
 
     clear_page(ptr);
     unmap_domain_page(ptr);
 }
 
-void copy_domain_page(unsigned long dmfn, unsigned long smfn)
+void copy_domain_page(mfn_t dest, mfn_t source)
 {
-    const void *src = map_domain_page(smfn);
-    void *dst = map_domain_page(dmfn);
+    const void *src = map_domain_page(mfn_x(source));
+    void *dst = map_domain_page(mfn_x(dest));
 
     copy_page(dst, src);
     unmap_domain_page(dst);
     unmap_domain_page(src);
 }
-#endif
 
 void destroy_ring_for_helper(
     void **_va, struct page_info *page)
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index 1aac0eb..41f365c 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -11,6 +11,12 @@
 
 #include <xen/mm.h>
 
+/*
+ * Clear a given page frame, or copy between two of them.
+ */
+void clear_domain_page(mfn_t mfn);
+void copy_domain_page(mfn_t dst, const mfn_t src);
+
 #ifdef CONFIG_DOMAIN_PAGE
 
 /*
@@ -25,12 +31,6 @@ void *map_domain_page(unsigned long mfn);
  */
 void unmap_domain_page(const void *va);
 
-/*
- * Clear a given page frame, or copy between two of them.
- */
-void clear_domain_page(unsigned long mfn);
-void copy_domain_page(unsigned long dmfn, unsigned long smfn);
-
 /* 
  * Given a VA from map_domain_page(), return its underlying MFN.
  */
@@ -116,9 +116,6 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
 #define map_domain_page(mfn)                mfn_to_virt(mfn)
 #define __map_domain_page(pg)               page_to_virt(pg)
 #define unmap_domain_page(va)               ((void)(va))
-#define clear_domain_page(mfn)              clear_page(mfn_to_virt(mfn))
-#define copy_domain_page(dmfn, smfn)        copy_page(mfn_to_virt(dmfn), \
-                                                      mfn_to_virt(smfn))
 #define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
 
 static inline void *map_domain_page_global(mfn_t mfn)
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 3/3] Convert map_domain_page() to use the new mfn_t type
  2015-07-02 12:04 [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
  2015-07-02 12:04 ` [PATCH v2 2/3] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
@ 2015-07-02 12:04 ` Ben Catterall
  2015-07-07 10:10   ` Jan Beulich
  2015-07-07 10:01 ` [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Jan Beulich
  2015-07-07 15:12 ` Ian Campbell
  3 siblings, 1 reply; 15+ messages in thread
From: Ben Catterall @ 2015-07-02 12:04 UTC (permalink / raw)
  To: xen-devel
  Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini,
	jbeulich, Ben Catterall

Reworked the internals and declaration, applying (un)boxing
where needed. Converted calls to map_domain_page() to
provide mfn_t types, boxing where needed.

Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

---
Changed since v1:
   * Created paddr_to_mfn() and mfn_to_paddr() for both x86 and ARM
   * Converted code to use the new paddr_to_mfn() rather than e.g.
     paddr>>PAGE_SHIFT

Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
---
 xen/arch/arm/domain_build.c               |  2 +-
 xen/arch/arm/kernel.c                     |  2 +-
 xen/arch/arm/mm.c                         | 12 +++++-----
 xen/arch/arm/p2m.c                        |  4 ++--
 xen/arch/arm/traps.c                      |  4 ++--
 xen/arch/x86/debug.c                      | 10 ++++----
 xen/arch/x86/domain.c                     |  4 ++--
 xen/arch/x86/domain_build.c               | 10 ++++----
 xen/arch/x86/domain_page.c                | 22 ++++++++---------
 xen/arch/x86/domctl.c                     |  2 +-
 xen/arch/x86/mm.c                         | 40 +++++++++++++++----------------
 xen/arch/x86/mm/guest_walk.c              |  2 +-
 xen/arch/x86/mm/hap/guest_walk.c          |  2 +-
 xen/arch/x86/mm/mem_sharing.c             |  4 ++--
 xen/arch/x86/mm/p2m-ept.c                 | 22 ++++++++---------
 xen/arch/x86/mm/p2m-pod.c                 |  8 +++----
 xen/arch/x86/mm/p2m-pt.c                  | 28 +++++++++++-----------
 xen/arch/x86/mm/p2m.c                     |  2 +-
 xen/arch/x86/mm/paging.c                  | 32 ++++++++++++-------------
 xen/arch/x86/mm/shadow/common.c           |  2 +-
 xen/arch/x86/mm/shadow/multi.c            |  4 ++--
 xen/arch/x86/mm/shadow/private.h          |  2 +-
 xen/arch/x86/smpboot.c                    |  2 +-
 xen/arch/x86/tboot.c                      |  4 ++--
 xen/arch/x86/traps.c                      | 12 +++++-----
 xen/arch/x86/x86_64/mm.c                  | 14 +++++------
 xen/arch/x86/x86_64/traps.c               | 10 ++++----
 xen/arch/x86/x86_emulate.c                | 10 ++++----
 xen/common/grant_table.c                  |  4 ++--
 xen/common/kexec.c                        |  4 ++--
 xen/common/kimage.c                       | 10 ++++----
 xen/common/memory.c                       |  6 ++---
 xen/common/tmem_xen.c                     |  6 ++---
 xen/drivers/passthrough/amd/iommu_guest.c | 10 ++++----
 xen/drivers/passthrough/amd/iommu_map.c   | 14 +++++------
 xen/drivers/passthrough/vtd/x86/vtd.c     |  2 +-
 xen/include/asm-arm/mm.h                  |  2 ++
 xen/include/asm-x86/hap.h                 |  2 +-
 xen/include/asm-x86/page.h                | 10 +++++---
 xen/include/asm-x86/paging.h              |  2 +-
 xen/include/xen/domain_page.h             |  8 +++----
 41 files changed, 179 insertions(+), 173 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index e9cb8a9..37db8b7 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -1322,7 +1322,7 @@ static void initrd_load(struct kernel_info *kinfo)
             return;
         }
 
-        dst = map_domain_page(ma>>PAGE_SHIFT);
+        dst = map_domain_page(_mfn(paddr_to_mfn(ma)));
 
         copy_from_paddr(dst + s, paddr + offs, l);
 
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 209c3dd..9826fb2 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -182,7 +182,7 @@ static void kernel_zimage_load(struct kernel_info *info)
             return;
         }
 
-        dst = map_domain_page(ma>>PAGE_SHIFT);
+        dst = map_domain_page(_mfn(paddr_to_mfn(ma)));
 
         copy_from_paddr(dst + s, paddr + offs, l);
 
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index d479048..ae0f34c 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -213,7 +213,7 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr,
     else
         root_table = 0;
 
-    mapping = map_domain_page(root_pfn + root_table);
+    mapping = map_domain_page(_mfn(root_pfn + root_table));
 
     for ( level = root_level; ; level++ )
     {
@@ -230,7 +230,7 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr,
 
         /* For next iteration */
         unmap_domain_page(mapping);
-        mapping = map_domain_page(pte.walk.base);
+        mapping = map_domain_page(_mfn(pte.walk.base));
     }
 
     unmap_domain_page(mapping);
@@ -282,11 +282,11 @@ void unmap_domain_page_global(const void *va)
 }
 
 /* Map a page of domheap memory */
-void *map_domain_page(unsigned long mfn)
+void *map_domain_page(mfn_t mfn)
 {
     unsigned long flags;
     lpae_t *map = this_cpu(xen_dommap);
-    unsigned long slot_mfn = mfn & ~LPAE_ENTRY_MASK;
+    unsigned long slot_mfn = mfn_x(mfn) & ~LPAE_ENTRY_MASK;
     vaddr_t va;
     lpae_t pte;
     int i, slot;
@@ -339,7 +339,7 @@ void *map_domain_page(unsigned long mfn)
 
     va = (DOMHEAP_VIRT_START
           + (slot << SECOND_SHIFT)
-          + ((mfn & LPAE_ENTRY_MASK) << THIRD_SHIFT));
+          + ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT));
 
     /*
      * We may not have flushed this specific subpage at map time,
@@ -386,7 +386,7 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
 
 void flush_page_to_ram(unsigned long mfn)
 {
-    void *v = map_domain_page(mfn);
+    void *v = map_domain_page(_mfn(mfn));
 
     clean_and_invalidate_dcache_va_range(v, PAGE_SIZE);
     unmap_domain_page(v);
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 903fa3f..18fe91f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -206,7 +206,7 @@ static paddr_t __p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t)
 
         /* Map for next level */
         unmap_domain_page(map);
-        map = map_domain_page(pte.p2m.base);
+        map = map_domain_page(_mfn(pte.p2m.base));
     }
 
     unmap_domain_page(map);
@@ -1078,7 +1078,7 @@ static int apply_p2m_changes(struct domain *d,
                 int i;
                 if ( mappings[level+1] )
                     unmap_domain_page(mappings[level+1]);
-                mappings[level+1] = map_domain_page(entry->p2m.base);
+                mappings[level+1] = map_domain_page(_mfn(entry->p2m.base));
                 cur_offset[level] = offset;
                 /* Any mapping further down is now invalid */
                 for ( i = level+1; i < 4; i++ )
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 258d4c5..dad090f 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -2263,7 +2263,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
         printk("Failed TTBR0 maddr lookup\n");
         goto done;
     }
-    first = map_domain_page(paddr>>PAGE_SHIFT);
+    first = map_domain_page(_mfn(paddr_to_mfn(paddr)));
 
     offset = addr >> (12+10);
     printk("1ST[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
@@ -2279,7 +2279,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
         printk("Failed L1 entry maddr lookup\n");
         goto done;
     }
-    second = map_domain_page(paddr>>PAGE_SHIFT);
+    second = map_domain_page(_mfn(paddr_to_mfn(paddr)));
     offset = (addr >> 12) & 0x3FF;
     printk("2ND[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
            offset, paddr, second[offset]);
diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index 801dcf2..ee41463 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -108,7 +108,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
 
     if ( pgd3val == 0 )
     {
-        l4t = map_domain_page(mfn);
+        l4t = map_domain_page(_mfn(mfn));
         l4e = l4t[l4_table_offset(vaddr)];
         unmap_domain_page(l4t);
         mfn = l4e_get_pfn(l4e);
@@ -120,7 +120,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
             return INVALID_MFN;
         }
 
-        l3t = map_domain_page(mfn);
+        l3t = map_domain_page(_mfn(mfn));
         l3e = l3t[l3_table_offset(vaddr)];
         unmap_domain_page(l3t);
         mfn = l3e_get_pfn(l3e);
@@ -134,7 +134,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
         }
     }
 
-    l2t = map_domain_page(mfn);
+    l2t = map_domain_page(_mfn(mfn));
     l2e = l2t[l2_table_offset(vaddr)];
     unmap_domain_page(l2t);
     mfn = l2e_get_pfn(l2e);
@@ -146,7 +146,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
         DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
         return INVALID_MFN;
     }
-    l1t = map_domain_page(mfn);
+    l1t = map_domain_page(_mfn(mfn));
     l1e = l1t[l1_table_offset(vaddr)];
     unmap_domain_page(l1t);
     mfn = l1e_get_pfn(l1e);
@@ -175,7 +175,7 @@ unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
         if ( mfn == INVALID_MFN ) 
             break;
 
-        va = map_domain_page(mfn);
+        va = map_domain_page(_mfn(mfn));
         va = va + (addr & (PAGE_SIZE-1));
 
         if ( toaddr )
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 0363650..c73b147 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -868,7 +868,7 @@ int arch_set_info_guest(
                 fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1];
             }
         } else {
-            l4_pgentry_t *l4tab = map_domain_page(pfn);
+            l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn));
 
             pfn = l4e_get_pfn(*l4tab);
             unmap_domain_page(l4tab);
@@ -1028,7 +1028,7 @@ int arch_set_info_guest(
     {
         l4_pgentry_t *l4tab;
 
-        l4tab = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+        l4tab = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
         *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
             _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
         unmap_domain_page(l4tab);
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index d76707f..3aeebb8 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -630,7 +630,7 @@ static __init void pvh_fixup_page_tables_for_hap(struct vcpu *v,
 
     ASSERT(paging_mode_enabled(v->domain));
 
-    l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+    l4start = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
 
     /* Clear entries prior to guest L4 start */
     pl4e = l4start + l4_table_offset(v_start);
@@ -746,7 +746,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
                                     unsigned long nr_pages)
 {
     struct page_info *page = NULL;
-    l4_pgentry_t *pl4e, *l4start = map_domain_page(pgtbl_pfn);
+    l4_pgentry_t *pl4e, *l4start = map_domain_page(_mfn(pgtbl_pfn));
     l3_pgentry_t *pl3e = NULL;
     l2_pgentry_t *pl2e = NULL;
     l1_pgentry_t *pl1e = NULL;
@@ -789,7 +789,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
             clear_page(pl3e);
             *pl4e = l4e_from_page(page, L4_PROT);
         } else
-            pl3e = map_domain_page(l4e_get_pfn(*pl4e));
+            pl3e = map_domain_page(_mfn(l4e_get_pfn(*pl4e)));
 
         pl3e += l3_table_offset(vphysmap_start);
         if ( !l3e_get_intpte(*pl3e) )
@@ -816,7 +816,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
             *pl3e = l3e_from_page(page, L3_PROT);
         }
         else
-           pl2e = map_domain_page(l3e_get_pfn(*pl3e));
+            pl2e = map_domain_page(_mfn(l3e_get_pfn(*pl3e)));
 
         pl2e += l2_table_offset(vphysmap_start);
         if ( !l2e_get_intpte(*pl2e) )
@@ -844,7 +844,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
             *pl2e = l2e_from_page(page, L2_PROT);
         }
         else
-            pl1e = map_domain_page(l2e_get_pfn(*pl2e));
+            pl1e = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
 
         pl1e += l1_table_offset(vphysmap_start);
         BUG_ON(l1e_get_intpte(*pl1e));
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 0f7548b..d86f8fe 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -66,7 +66,7 @@ void __init mapcache_override_current(struct vcpu *v)
 #define MAPCACHE_L1ENT(idx) \
     __linear_l1_table[l1_linear_offset(MAPCACHE_VIRT_START + pfn_to_paddr(idx))]
 
-void *map_domain_page(unsigned long mfn)
+void *map_domain_page(mfn_t mfn)
 {
     unsigned long flags;
     unsigned int idx, i;
@@ -76,31 +76,31 @@ void *map_domain_page(unsigned long mfn)
     struct vcpu_maphash_entry *hashent;
 
 #ifdef NDEBUG
-    if ( mfn <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn);
+    if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+        return mfn_to_virt(mfn_x(mfn));
 #endif
 
     v = mapcache_current_vcpu();
     if ( !v || !is_pv_vcpu(v) )
-        return mfn_to_virt(mfn);
+        return mfn_to_virt(mfn_x(mfn));
 
     dcache = &v->domain->arch.pv_domain.mapcache;
     vcache = &v->arch.pv_vcpu.mapcache;
     if ( !dcache->inuse )
-        return mfn_to_virt(mfn);
+        return mfn_to_virt(mfn_x(mfn));
 
     perfc_incr(map_domain_page_count);
 
     local_irq_save(flags);
 
-    hashent = &vcache->hash[MAPHASH_HASHFN(mfn)];
-    if ( hashent->mfn == mfn )
+    hashent = &vcache->hash[MAPHASH_HASHFN(mfn_x(mfn))];
+    if ( hashent->mfn == mfn_x(mfn) )
     {
         idx = hashent->idx;
         ASSERT(idx < dcache->entries);
         hashent->refcnt++;
         ASSERT(hashent->refcnt);
-        ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn);
+        ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn_x(mfn));
         goto out;
     }
 
@@ -135,7 +135,7 @@ void *map_domain_page(unsigned long mfn)
         else
         {
             /* Replace a hash entry instead. */
-            i = MAPHASH_HASHFN(mfn);
+            i = MAPHASH_HASHFN(mfn_x(mfn));
             do {
                 hashent = &vcache->hash[i];
                 if ( hashent->idx != MAPHASHENT_NOTINUSE && !hashent->refcnt )
@@ -149,7 +149,7 @@ void *map_domain_page(unsigned long mfn)
                 }
                 if ( ++i == MAPHASH_ENTRIES )
                     i = 0;
-            } while ( i != MAPHASH_HASHFN(mfn) );
+            } while ( i != MAPHASH_HASHFN(mfn_x(mfn)) );
         }
         BUG_ON(idx >= dcache->entries);
 
@@ -165,7 +165,7 @@ void *map_domain_page(unsigned long mfn)
 
     spin_unlock(&dcache->lock);
 
-    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR_RW));
+    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn_x(mfn), __PAGE_HYPERVISOR_RW));
 
  out:
     local_irq_restore(flags);
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index d8ffe2b..967496e 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1293,7 +1293,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
         else
         {
             const l4_pgentry_t *l4e =
-                map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+                map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
 
             c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
             unmap_domain_page(l4e);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 8a7524f..eef6496 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1175,7 +1175,7 @@ static int alloc_l1_table(struct page_info *page)
     unsigned int   i;
     int            ret = 0;
 
-    pl1e = map_domain_page(pfn);
+    pl1e = map_domain_page(_mfn(pfn));
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
     {
@@ -1256,7 +1256,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type,
     unsigned int   i;
     int            rc = 0;
 
-    pl2e = map_domain_page(pfn);
+    pl2e = map_domain_page(_mfn(pfn));
 
     for ( i = page->nr_validated_ptes; i < L2_PAGETABLE_ENTRIES; i++ )
     {
@@ -1305,7 +1305,7 @@ static int alloc_l3_table(struct page_info *page)
     unsigned int   i;
     int            rc = 0, partial = page->partial_pte;
 
-    pl3e = map_domain_page(pfn);
+    pl3e = map_domain_page(_mfn(pfn));
 
     /*
      * PAE guests allocate full pages, but aren't required to initialize
@@ -1397,7 +1397,7 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
 
 void fill_ro_mpt(unsigned long mfn)
 {
-    l4_pgentry_t *l4tab = map_domain_page(mfn);
+    l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
         idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
@@ -1406,7 +1406,7 @@ void fill_ro_mpt(unsigned long mfn)
 
 void zap_ro_mpt(unsigned long mfn)
 {
-    l4_pgentry_t *l4tab = map_domain_page(mfn);
+    l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
     unmap_domain_page(l4tab);
@@ -1416,7 +1416,7 @@ static int alloc_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     unsigned long  pfn = page_to_mfn(page);
-    l4_pgentry_t  *pl4e = map_domain_page(pfn);
+    l4_pgentry_t  *pl4e = map_domain_page(_mfn(pfn));
     unsigned int   i;
     int            rc = 0, partial = page->partial_pte;
 
@@ -1472,7 +1472,7 @@ static void free_l1_table(struct page_info *page)
     l1_pgentry_t *pl1e;
     unsigned int  i;
 
-    pl1e = map_domain_page(pfn);
+    pl1e = map_domain_page(_mfn(pfn));
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l1_slot(i) )
@@ -1490,7 +1490,7 @@ static int free_l2_table(struct page_info *page, int preemptible)
     unsigned int  i = page->nr_validated_ptes - 1;
     int err = 0;
 
-    pl2e = map_domain_page(pfn);
+    pl2e = map_domain_page(_mfn(pfn));
 
     ASSERT(page->nr_validated_ptes);
     do {
@@ -1519,7 +1519,7 @@ static int free_l3_table(struct page_info *page)
     int rc = 0, partial = page->partial_pte;
     unsigned int  i = page->nr_validated_ptes - !partial;
 
-    pl3e = map_domain_page(pfn);
+    pl3e = map_domain_page(_mfn(pfn));
 
     do {
         if ( is_guest_l3_slot(i) )
@@ -1554,7 +1554,7 @@ static int free_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     unsigned long pfn = page_to_mfn(page);
-    l4_pgentry_t *pl4e = map_domain_page(pfn);
+    l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn));
     int rc = 0, partial = page->partial_pte;
     unsigned int  i = page->nr_validated_ptes - !partial;
 
@@ -2654,7 +2654,7 @@ int vcpu_destroy_pagetables(struct vcpu *v)
 
     if ( is_pv_32on64_vcpu(v) )
     {
-        l4tab = map_domain_page(mfn);
+        l4tab = map_domain_page(_mfn(mfn));
         mfn = l4e_get_pfn(*l4tab);
     }
 
@@ -2710,7 +2710,7 @@ int new_guest_cr3(unsigned long mfn)
     if ( is_pv_32on64_domain(d) )
     {
         unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table);
-        l4_pgentry_t *pl4e = map_domain_page(gt_mfn);
+        l4_pgentry_t *pl4e = map_domain_page(_mfn(gt_mfn));
 
         rc = paging_mode_refcounts(d)
              ? -EINVAL /* Old code was broken, but what should it be? */
@@ -3769,7 +3769,7 @@ static int create_grant_pte_mapping(
     }
     
     mfn = page_to_mfn(page);
-    va = map_domain_page(mfn);
+    va = map_domain_page(_mfn(mfn));
     va = (void *)((unsigned long)va + ((unsigned long)pte_addr & ~PAGE_MASK));
 
     if ( !page_lock(page) )
@@ -3824,7 +3824,7 @@ static int destroy_grant_pte_mapping(
     }
     
     mfn = page_to_mfn(page);
-    va = map_domain_page(mfn);
+    va = map_domain_page(_mfn(mfn));
     va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
 
     if ( !page_lock(page) )
@@ -4502,7 +4502,7 @@ long do_update_descriptor(u64 pa, u64 desc)
     paging_mark_dirty(dom, mfn);
 
     /* All is good so make the update. */
-    gdt_pent = map_domain_page(mfn);
+    gdt_pent = map_domain_page(_mfn(mfn));
     write_atomic((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
     unmap_domain_page(gdt_pent);
 
@@ -5040,7 +5040,7 @@ static int ptwr_emulated_update(
     adjust_guest_l1e(nl1e, d);
 
     /* Checked successfully: do the update (write or cmpxchg). */
-    pl1e = map_domain_page(mfn);
+    pl1e = map_domain_page(_mfn(mfn));
     pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
     if ( do_cmpxchg )
     {
@@ -5956,7 +5956,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
         l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
     }
     else
-        l2tab = map_domain_page(l3e_get_pfn(l3tab[l3_table_offset(va)]));
+        l2tab = map_domain_page(_mfn(l3e_get_pfn(l3tab[l3_table_offset(va)])));
 
     unmap_domain_page(l3tab);
 
@@ -5998,7 +5998,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
             *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR);
         }
         else if ( !l1tab )
-            l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+            l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
 
         if ( ppg &&
              !(l1e_get_flags(l1tab[l1_table_offset(va)]) & _PAGE_PRESENT) )
@@ -6049,7 +6049,7 @@ void destroy_perdomain_mapping(struct domain *d, unsigned long va,
 
     if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
     {
-        const l2_pgentry_t *l2tab = map_domain_page(l3e_get_pfn(*pl3e));
+        const l2_pgentry_t *l2tab = map_domain_page(_mfn(l3e_get_pfn(*pl3e)));
         const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
         unsigned int i = l1_table_offset(va);
 
@@ -6057,7 +6057,7 @@ void destroy_perdomain_mapping(struct domain *d, unsigned long va,
         {
             if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
             {
-                l1_pgentry_t *l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+                l1_pgentry_t *l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
 
                 for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i )
                 {
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 9c6c74f..30a653d 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -121,7 +121,7 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
     *mfn = _mfn(page_to_mfn(page));
     ASSERT(mfn_valid(mfn_x(*mfn)));
 
-    map = map_domain_page(mfn_x(*mfn));
+    map = map_domain_page(*mfn);
     return map;
 }
 
diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c
index 381a196..62ab454 100644
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -87,7 +87,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
 
     /* Map the top-level table and call the tree-walker */
     ASSERT(mfn_valid(mfn_x(top_mfn)));
-    top_map = map_domain_page(mfn_x(top_mfn));
+    top_map = map_domain_page(top_mfn);
 #if GUEST_PAGING_LEVELS == 3
     top_map += (cr3 & ~(PAGE_MASK | 31));
 #endif
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 0700f00..2f6e9e5 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1210,8 +1210,8 @@ int __mem_sharing_unshare_page(struct domain *d,
         return -ENOMEM;
     }
 
-    s = map_domain_page(__page_to_mfn(old_page));
-    t = map_domain_page(__page_to_mfn(page));
+    s = map_domain_page(_mfn(__page_to_mfn(old_page)));
+    t = map_domain_page(_mfn(__page_to_mfn(page)));
     memcpy(t, s, PAGE_SIZE);
     unmap_domain_page(s);
     unmap_domain_page(t);
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 5133eb6..eed3c34 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -245,7 +245,7 @@ static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int l
 
     if ( level > 1 )
     {
-        ept_entry_t *epte = map_domain_page(ept_entry->mfn);
+        ept_entry_t *epte = map_domain_page(_mfn(ept_entry->mfn));
         for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
             ept_free_entry(p2m, epte + i, level - 1);
         unmap_domain_page(epte);
@@ -270,7 +270,7 @@ static int ept_split_super_page(struct p2m_domain *p2m, ept_entry_t *ept_entry,
     if ( !ept_set_middle_entry(p2m, &new_ept) )
         return 0;
 
-    table = map_domain_page(new_ept.mfn);
+    table = map_domain_page(_mfn(new_ept.mfn));
     trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER);
 
     for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
@@ -358,7 +358,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only,
 
     mfn = e.mfn;
     unmap_domain_page(*table);
-    *table = map_domain_page(mfn);
+    *table = map_domain_page(_mfn(mfn));
     *gfn_remainder &= (1UL << shift) - 1;
     return GUEST_TABLE_NORMAL_PAGE;
 }
@@ -371,7 +371,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only,
 static bool_t ept_invalidate_emt(mfn_t mfn, bool_t recalc, int level)
 {
     int rc;
-    ept_entry_t *epte = map_domain_page(mfn_x(mfn));
+    ept_entry_t *epte = map_domain_page(mfn);
     unsigned int i;
     bool_t changed = 0;
 
@@ -413,7 +413,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
     unsigned int i, index;
     int wrc, rc = 0, ret = GUEST_TABLE_MAP_FAILED;
 
-    table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+    table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
     for ( i = ept_get_wl(&p2m->ept); i > target; --i )
     {
         ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i);
@@ -497,7 +497,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
         ept_entry_t e;
         unsigned int i;
 
-        epte = map_domain_page(mfn);
+        epte = map_domain_page(_mfn(mfn));
         i = (gfn >> (level * EPT_TABLE_ORDER)) & (EPT_PAGETABLE_ENTRIES - 1);
         e = atomic_read_ept_entry(&epte[i]);
 
@@ -688,7 +688,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
            (target == 0));
     ASSERT(!p2m_is_foreign(p2mt) || target == 0);
 
-    table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+    table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
 
     ret = GUEST_TABLE_MAP_FAILED;
     for ( i = ept_get_wl(ept); i > target; i-- )
@@ -839,7 +839,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
                            unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
                            p2m_query_t q, unsigned int *page_order)
 {
-    ept_entry_t *table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+    ept_entry_t *table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
     unsigned long gfn_remainder = gfn;
     ept_entry_t *ept_entry;
     u32 index;
@@ -943,7 +943,7 @@ void ept_walk_table(struct domain *d, unsigned long gfn)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     struct ept_data *ept = &p2m->ept;
-    ept_entry_t *table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+    ept_entry_t *table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
     unsigned long gfn_remainder = gfn;
 
     int i;
@@ -976,7 +976,7 @@ void ept_walk_table(struct domain *d, unsigned long gfn)
         {
             gfn_remainder &= (1UL << (i*EPT_TABLE_ORDER)) - 1;
 
-            next = map_domain_page(ept_entry->mfn);
+            next = map_domain_page(_mfn(ept_entry->mfn));
 
             unmap_domain_page(table);
 
@@ -1184,7 +1184,7 @@ static void ept_dump_p2m_table(unsigned char key)
             char c = 0;
 
             gfn_remainder = gfn;
-            table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+            table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
 
             for ( i = ept_get_wl(ept); i > 0; i-- )
             {
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 0679f00..6e27bcd 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -109,7 +109,7 @@ p2m_pod_cache_add(struct p2m_domain *p2m,
      */
     for ( i = 0; i < (1 << order); i++ )
     {
-        char *b = map_domain_page(mfn_x(page_to_mfn(page)) + i);
+        char *b = map_domain_page(_mfn(mfn_x(page_to_mfn(page)) + i));
         clear_page(b);
         unmap_domain_page(b);
     }
@@ -710,7 +710,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
     for ( i=0; i<SUPERPAGE_PAGES; i++ )
     {
         /* Quick zero-check */
-        map = map_domain_page(mfn_x(mfn0) + i);
+        map = map_domain_page(_mfn(mfn_x(mfn0) + i));
 
         for ( j=0; j<16; j++ )
             if( *(map+j) != 0 )
@@ -743,7 +743,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
     /* Finally, do a full zero-check */
     for ( i=0; i < SUPERPAGE_PAGES; i++ )
     {
-        map = map_domain_page(mfn_x(mfn0) + i);
+        map = map_domain_page(_mfn(mfn_x(mfn0) + i));
 
         for ( j=0; j<PAGE_SIZE/sizeof(*map); j++ )
             if( *(map+j) != 0 )
@@ -815,7 +815,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
              && ( (mfn_to_page(mfns[i])->count_info & PGC_allocated) != 0 ) 
              && ( (mfn_to_page(mfns[i])->count_info & (PGC_page_table|PGC_xen_heap)) == 0 ) 
              && ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) <= max_ref ) )
-            map[i] = map_domain_page(mfn_x(mfns[i]));
+            map[i] = map_domain_page(mfns[i]);
         else
             map[i] = NULL;
     }
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index e50b6fa..a6dd464 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -146,7 +146,7 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
 
     if ( page_order > PAGE_ORDER_2M )
     {
-        l1_pgentry_t *l3_table = map_domain_page(l1e_get_pfn(*p2m_entry));
+        l1_pgentry_t *l3_table = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
         for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
             p2m_free_entry(p2m, l3_table + i, page_order - 9);
         unmap_domain_page(l3_table);
@@ -280,7 +280,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
     }
 
-    next = map_domain_page(l1e_get_pfn(*p2m_entry));
+    next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
     if ( unmap )
         unmap_domain_page(*table);
     *table = next;
@@ -304,7 +304,7 @@ static int p2m_pt_set_recalc_range(struct p2m_domain *p2m,
     l1_pgentry_t *pent, *plast;
     int err = 0;
 
-    table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     for ( i = 4; i-- > level; )
     {
         remainder = gfn_remainder;
@@ -366,7 +366,7 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
     l1_pgentry_t *pent;
     int err = 0;
 
-    table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     while ( --level )
     {
         unsigned long remainder = gfn_remainder;
@@ -524,7 +524,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     if ( rc < 0 )
         return rc;
 
-    table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                         L4_PAGETABLE_SHIFT - PAGE_SHIFT,
                         L4_PAGETABLE_ENTRIES, PGT_l3_page_table, 1);
@@ -716,7 +716,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
     mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
 
     {
-        l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
+        l4_pgentry_t *l4e = map_domain_page(mfn);
         l4e += l4_table_offset(addr);
         if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
         {
@@ -728,7 +728,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
         unmap_domain_page(l4e);
     }
     {
-        l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
+        l3_pgentry_t *l3e = map_domain_page(mfn);
         l3e += l3_table_offset(addr);
 pod_retry_l3:
         flags = l3e_get_flags(*l3e);
@@ -769,7 +769,7 @@ pod_retry_l3:
         unmap_domain_page(l3e);
     }
 
-    l2e = map_domain_page(mfn_x(mfn));
+    l2e = map_domain_page(mfn);
     l2e += l2_table_offset(addr);
 
 pod_retry_l2:
@@ -807,7 +807,7 @@ pod_retry_l2:
         recalc = 1;
     unmap_domain_page(l2e);
 
-    l1e = map_domain_page(mfn_x(mfn));
+    l1e = map_domain_page(mfn);
     l1e += l1_table_offset(addr);
 pod_retry_l1:
     flags = l1e_get_flags(*l1e);
@@ -849,7 +849,7 @@ static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
 
     ASSERT(hap_enabled(p2m->domain));
 
-    tab = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    tab = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     for ( changed = i = 0; i < (1 << PAGETABLE_ORDER); ++i )
     {
         l1_pgentry_t e = tab[i];
@@ -929,7 +929,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
         l4_pgentry_t *l4e;
         l3_pgentry_t *l3e;
         int i4, i3;
-        l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+        l4e = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
 
         gfn = 0;
         for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
@@ -939,7 +939,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                 gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
                 continue;
             }
-            l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
+            l3e = map_domain_page(_mfn(l4e_get_pfn(l4e[i4])));
             for ( i3 = 0;
                   i3 < L3_PAGETABLE_ENTRIES;
                   i3++ )
@@ -974,7 +974,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                     }
                 }
 
-                l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
+                l2e = map_domain_page(_mfn(l3e_get_pfn(l3e[i3])));
                 for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
                 {
                     if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
@@ -1010,7 +1010,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                         continue;
                     }
 
-                    l1e = map_domain_page(l2e_get_pfn(l2e[i2]));
+                    l1e = map_domain_page(_mfn(l2e_get_pfn(l2e[i2])));
 
                     for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
                     {
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 1fd1194..63c5a23 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1285,7 +1285,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long gfn, uint64_t buffer)
         int rc;
 
         ASSERT( mfn_valid(mfn) );
-        guest_map = map_domain_page(mfn_x(mfn));
+        guest_map = map_domain_page(mfn);
         rc = copy_from_user(guest_map, user_ptr, PAGE_SIZE);
         unmap_domain_page(guest_map);
         if ( rc )
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 59d4720..7089155 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -81,7 +81,7 @@ static mfn_t paging_new_log_dirty_leaf(struct domain *d)
     mfn_t mfn = paging_new_log_dirty_page(d);
     if ( mfn_valid(mfn) )
     {
-        void *leaf = map_domain_page(mfn_x(mfn));
+        void *leaf = map_domain_page(mfn);
         clear_page(leaf);
         unmap_domain_page(leaf);
     }
@@ -95,7 +95,7 @@ static mfn_t paging_new_log_dirty_node(struct domain *d)
     if ( mfn_valid(mfn) )
     {
         int i;
-        mfn_t *node = map_domain_page(mfn_x(mfn));
+        mfn_t *node = map_domain_page(mfn);
         for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
             node[i] = _mfn(INVALID_MFN);
         unmap_domain_page(node);
@@ -107,7 +107,7 @@ static mfn_t paging_new_log_dirty_node(struct domain *d)
 static mfn_t *paging_map_log_dirty_bitmap(struct domain *d)
 {
     if ( likely(mfn_valid(d->arch.paging.log_dirty.top)) )
-        return map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+        return map_domain_page(d->arch.paging.log_dirty.top);
     return NULL;
 }
 
@@ -144,7 +144,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, int rc)
         return -EBUSY;
     }
 
-    l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+    l4 = map_domain_page(d->arch.paging.log_dirty.top);
     i4 = d->arch.paging.preempt.log_dirty.i4;
     i3 = d->arch.paging.preempt.log_dirty.i3;
     rc = 0;
@@ -154,14 +154,14 @@ static int paging_free_log_dirty_bitmap(struct domain *d, int rc)
         if ( !mfn_valid(l4[i4]) )
             continue;
 
-        l3 = map_domain_page(mfn_x(l4[i4]));
+        l3 = map_domain_page(l4[i4]);
 
         for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
         {
             if ( !mfn_valid(l3[i3]) )
                 continue;
 
-            l2 = map_domain_page(mfn_x(l3[i3]));
+            l2 = map_domain_page(l3[i3]);
 
             for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
                 if ( mfn_valid(l2[i2]) )
@@ -311,7 +311,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
     if ( !mfn_valid(mfn) )
         goto out;
 
-    l3 = map_domain_page(mfn_x(mfn));
+    l3 = map_domain_page(mfn);
     mfn = l3[i3];
     if ( !mfn_valid(mfn) )
         l3[i3] = mfn = paging_new_log_dirty_node(d);
@@ -319,7 +319,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
     if ( !mfn_valid(mfn) )
         goto out;
 
-    l2 = map_domain_page(mfn_x(mfn));
+    l2 = map_domain_page(mfn);
     mfn = l2[i2];
     if ( !mfn_valid(mfn) )
         l2[i2] = mfn = paging_new_log_dirty_leaf(d);
@@ -327,7 +327,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
     if ( !mfn_valid(mfn) )
         goto out;
 
-    l1 = map_domain_page(mfn_x(mfn));
+    l1 = map_domain_page(mfn);
     changed = !__test_and_set_bit(i1, l1);
     unmap_domain_page(l1);
     if ( changed )
@@ -384,25 +384,25 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
     if ( !mfn_valid(mfn) )
         return 0;
 
-    l4 = map_domain_page(mfn_x(mfn));
+    l4 = map_domain_page(mfn);
     mfn = l4[L4_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l4);
     if ( !mfn_valid(mfn) )
         return 0;
 
-    l3 = map_domain_page(mfn_x(mfn));
+    l3 = map_domain_page(mfn);
     mfn = l3[L3_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l3);
     if ( !mfn_valid(mfn) )
         return 0;
 
-    l2 = map_domain_page(mfn_x(mfn));
+    l2 = map_domain_page(mfn);
     mfn = l2[L2_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l2);
     if ( !mfn_valid(mfn) )
         return 0;
 
-    l1 = map_domain_page(mfn_x(mfn));
+    l1 = map_domain_page(mfn);
     rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1);
     unmap_domain_page(l1);
     return rv;
@@ -476,18 +476,18 @@ static int paging_log_dirty_op(struct domain *d,
 
     for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
     {
-        l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
+        l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(l4[i4]) : NULL;
         for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
         {
             l2 = ((l3 && mfn_valid(l3[i3])) ?
-                  map_domain_page(mfn_x(l3[i3])) : NULL);
+                  map_domain_page(l3[i3]) : NULL);
             for ( i2 = 0;
                   (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
                   i2++ )
             {
                 unsigned int bytes = PAGE_SIZE;
                 l1 = ((l2 && mfn_valid(l2[i2])) ?
-                      map_domain_page(mfn_x(l2[i2])) : NULL);
+                      map_domain_page(l2[i2]) : NULL);
                 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
                 if ( likely(peek) )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index c7e0e54..da6b847 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3393,7 +3393,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
             if ( (l1e_get_flags(new) & _PAGE_PRESENT)
                  && !(l1e_get_flags(new) & _PAGE_PSE)
                  && mfn_valid(nmfn) )
-                npte = map_domain_page(mfn_x(nmfn));
+                npte = map_domain_page(nmfn);
 
             for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
             {
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 54d0bd3..2e3d3f6 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -256,7 +256,7 @@ shadow_check_gl1e(struct vcpu *v, walk_t *gw)
         return 0;
 
     /* Can't just pull-through because mfn may have changed */
-    l1p = map_domain_page(mfn_x(gw->l1mfn));
+    l1p = map_domain_page(gw->l1mfn);
     nl1e.l1 = l1p[guest_l1_table_offset(gw->va)].l1;
     unmap_domain_page(l1p);
 
@@ -384,7 +384,7 @@ sh_guest_map_l1e(struct vcpu *v, unsigned long addr,
     {
         if ( gl1mfn )
             *gl1mfn = mfn_x(gw.l1mfn);
-        pl1e = map_domain_page(mfn_x(gw.l1mfn)) +
+        pl1e = map_domain_page(gw.l1mfn) +
             (guest_l1_table_offset(addr) * sizeof(guest_l1e_t));
     }
 
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index eff39dc..31b36ef 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -508,7 +508,7 @@ sh_mfn_is_a_page_table(mfn_t gmfn)
 static inline void *
 sh_map_domain_page(mfn_t mfn)
 {
-    return map_domain_page(mfn_x(mfn));
+    return map_domain_page(mfn);
 }
 
 static inline void
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 2289284..3cad8d3 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -657,7 +657,7 @@ static void cpu_smpboot_free(unsigned int cpu)
     if ( per_cpu(stubs.addr, cpu) )
     {
         unsigned long mfn = per_cpu(stubs.mfn, cpu);
-        unsigned char *stub_page = map_domain_page(mfn);
+        unsigned char *stub_page = map_domain_page(_mfn(mfn));
         unsigned int i;
 
         memset(stub_page + STUB_BUF_CPU_OFFS(cpu), 0xcc, STUB_BUF_SIZE);
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index 01b9530..7b41be0 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -161,7 +161,7 @@ static void update_iommu_mac(vmac_ctx_t *ctx, uint64_t pt_maddr, int level)
     if ( pt_maddr == 0 )
         return;
 
-    pt_vaddr = (struct dma_pte *)map_domain_page(pt_maddr >> PAGE_SHIFT_4K);
+    pt_vaddr = (struct dma_pte *)map_domain_page(_mfn(paddr_to_mfn(pt_maddr)));
     vmac_update((void *)pt_vaddr, PAGE_SIZE, ctx);
 
     for ( i = 0; i < PTE_NUM; i++ )
@@ -194,7 +194,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
         {
             if ( page->count_info & PGC_page_table )
             {
-                void *pg = map_domain_page(mfn);
+                void *pg = map_domain_page(_mfn(mfn));
                 vmac_update(pg, PAGE_SIZE, ctx);
                 unmap_domain_page(pg);
             }
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index c634008..bc3b055 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1319,7 +1319,7 @@ static enum pf_type __page_fault_type(
 
     mfn = cr3 >> PAGE_SHIFT;
 
-    l4t = map_domain_page(mfn);
+    l4t = map_domain_page(_mfn(mfn));
     l4e = l4e_read_atomic(&l4t[l4_table_offset(addr)]);
     mfn = l4e_get_pfn(l4e);
     unmap_domain_page(l4t);
@@ -1328,7 +1328,7 @@ static enum pf_type __page_fault_type(
         return real_fault;
     page_user &= l4e_get_flags(l4e);
 
-    l3t  = map_domain_page(mfn);
+    l3t  = map_domain_page(_mfn(mfn));
     l3e = l3e_read_atomic(&l3t[l3_table_offset(addr)]);
     mfn = l3e_get_pfn(l3e);
     unmap_domain_page(l3t);
@@ -1339,7 +1339,7 @@ static enum pf_type __page_fault_type(
     if ( l3e_get_flags(l3e) & _PAGE_PSE )
         goto leaf;
 
-    l2t = map_domain_page(mfn);
+    l2t = map_domain_page(_mfn(mfn));
     l2e = l2e_read_atomic(&l2t[l2_table_offset(addr)]);
     mfn = l2e_get_pfn(l2e);
     unmap_domain_page(l2t);
@@ -1350,7 +1350,7 @@ static enum pf_type __page_fault_type(
     if ( l2e_get_flags(l2e) & _PAGE_PSE )
         goto leaf;
 
-    l1t = map_domain_page(mfn);
+    l1t = map_domain_page(_mfn(mfn));
     l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
     mfn = l1e_get_pfn(l1e);
     unmap_domain_page(l1t);
@@ -2194,7 +2194,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
      * context. This is needed for some systems which (ab)use IN/OUT
      * to communicate with BIOS code in system-management mode.
      */
-    io_emul_stub = map_domain_page(this_cpu(stubs.mfn)) +
+    io_emul_stub = map_domain_page(_mfn(this_cpu(stubs.mfn))) +
                    (this_cpu(stubs.addr) & ~PAGE_MASK) +
                    STUB_BUF_SIZE / 2;
     /* movq $host_to_guest_gpr_switch,%rcx */
@@ -2390,7 +2390,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
             else
             {
                 l4_pgentry_t *pl4e =
-                    map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+                    map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
 
                 mfn = l4e_get_pfn(*pl4e);
                 unmap_domain_page(pl4e);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 3ef4618..864a851 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -59,7 +59,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
     if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
         return NULL;
 
-    l4t = map_domain_page(mfn);
+    l4t = map_domain_page(_mfn(mfn));
     l4e = l4t[l4_table_offset(addr)];
     unmap_domain_page(l4t);
     if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
@@ -77,7 +77,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
         goto ret;
     }
 
-    l2t = map_domain_page(mfn);
+    l2t = map_domain_page(_mfn(mfn));
     l2e = l2t[l2_table_offset(addr)];
     unmap_domain_page(l2t);
     mfn = l2e_get_pfn(l2e);
@@ -89,7 +89,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
         goto ret;
     }
 
-    l1t = map_domain_page(mfn);
+    l1t = map_domain_page(_mfn(mfn));
     l1e = l1t[l1_table_offset(addr)];
     unmap_domain_page(l1t);
     mfn = l1e_get_pfn(l1e);
@@ -97,7 +97,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
         return NULL;
 
  ret:
-    return map_domain_page(mfn) + (addr & ~PAGE_MASK);
+    return map_domain_page(_mfn(mfn)) + (addr & ~PAGE_MASK);
 }
 
 /*
@@ -1197,7 +1197,7 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
 
     mfn = (read_cr3()) >> PAGE_SHIFT;
 
-    pl4e = map_domain_page(mfn);
+    pl4e = map_domain_page(_mfn(mfn));
 
     l4e = pl4e[0];
 
@@ -1206,7 +1206,7 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
 
     mfn = l4e_get_pfn(l4e);
     /* We don't need get page type here since it is current CR3 */
-    pl3e = map_domain_page(mfn);
+    pl3e = map_domain_page(_mfn(mfn));
 
     l3e = pl3e[3];
 
@@ -1214,7 +1214,7 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
         goto unmap;
 
     mfn = l3e_get_pfn(l3e);
-    pl2e = map_domain_page(mfn);
+    pl2e = map_domain_page(_mfn(mfn));
 
     l2e = pl2e[l2_table_offset(addr)];
 
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 117a133..c0d5016 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -175,7 +175,7 @@ void show_page_walk(unsigned long addr)
     if ( !is_canonical_address(addr) )
         return;
 
-    l4t = map_domain_page(mfn);
+    l4t = map_domain_page(_mfn(mfn));
     l4e = l4t[l4_table_offset(addr)];
     unmap_domain_page(l4t);
     mfn = l4e_get_pfn(l4e);
@@ -187,7 +187,7 @@ void show_page_walk(unsigned long addr)
          !mfn_valid(mfn) )
         return;
 
-    l3t = map_domain_page(mfn);
+    l3t = map_domain_page(_mfn(mfn));
     l3e = l3t[l3_table_offset(addr)];
     unmap_domain_page(l3t);
     mfn = l3e_get_pfn(l3e);
@@ -201,7 +201,7 @@ void show_page_walk(unsigned long addr)
          !mfn_valid(mfn) )
         return;
 
-    l2t = map_domain_page(mfn);
+    l2t = map_domain_page(_mfn(mfn));
     l2e = l2t[l2_table_offset(addr)];
     unmap_domain_page(l2t);
     mfn = l2e_get_pfn(l2e);
@@ -215,7 +215,7 @@ void show_page_walk(unsigned long addr)
          !mfn_valid(mfn) )
         return;
 
-    l1t = map_domain_page(mfn);
+    l1t = map_domain_page(_mfn(mfn));
     l1e = l1t[l1_table_offset(addr)];
     unmap_domain_page(l1t);
     mfn = l1e_get_pfn(l1e);
@@ -381,7 +381,7 @@ void __devinit subarch_percpu_traps_init(void)
     /* IST_MAX IST pages + 1 syscall page + 1 guard page + primary stack. */
     BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
 
-    stub_page = map_domain_page(this_cpu(stubs.mfn));
+    stub_page = map_domain_page(_mfn(this_cpu(stubs.mfn)));
 
     /* Trampoline for SYSCALL entry from 64-bit mode. */
     wrmsrl(MSR_LSTAR, stub_va);
diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
index 51c8e44..28132b5 100644
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -23,11 +23,11 @@
 #define cpu_has_amd_erratum(nr) \
         cpu_has_amd_erratum(&current_cpu_data, AMD_ERRATUM_##nr)
 
-#define get_stub(stb) ({                                   \
-    BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1);    \
-    (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \
-    ((stb).ptr = map_domain_page(this_cpu(stubs.mfn))) +   \
-        ((stb).addr & ~PAGE_MASK);                         \
+#define get_stub(stb) ({                                        \
+    BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1);         \
+    (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2;      \
+    ((stb).ptr = map_domain_page(_mfn(this_cpu(stubs.mfn)))) +  \
+        ((stb).addr & ~PAGE_MASK);                              \
 })
 #define put_stub(stb) ({                                   \
     if ( (stb).ptr )                                       \
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 9786ecd..2543a04 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2311,7 +2311,7 @@ static int gnttab_copy_claim_buf(const struct gnttab_copy *op,
         buf->have_type = 1;
     }
 
-    buf->virt = map_domain_page(buf->frame);
+    buf->virt = map_domain_page(_mfn(buf->frame));
     rc = GNTST_okay;
 
  out:
@@ -2797,7 +2797,7 @@ static int __gnttab_cache_flush(gnttab_cache_flush_t *cflush,
         }
     }
 
-    v = map_domain_page(mfn);
+    v = map_domain_page(_mfn(mfn));
     v += cflush->offset;
 
     if ( (cflush->op & GNTTAB_CACHE_INVAL) && (cflush->op & GNTTAB_CACHE_CLEAN) )
diff --git a/xen/common/kexec.c b/xen/common/kexec.c
index 1e4a667..4dda007 100644
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -912,7 +912,7 @@ static int kexec_segments_from_ind_page(unsigned long mfn,
     kimage_entry_t *entry;
     int ret = 0;
 
-    page = map_domain_page(mfn);
+    page = map_domain_page(_mfn(mfn));
 
     /*
      * Walk the indirection page list, adding destination pages to the
@@ -934,7 +934,7 @@ static int kexec_segments_from_ind_page(unsigned long mfn,
             break;
         case IND_INDIRECTION:
             unmap_domain_page(page);
-            entry = page = map_domain_page(mfn);
+            entry = page = map_domain_page(_mfn(mfn));
             continue;
         case IND_DONE:
             goto done;
diff --git a/xen/common/kimage.c b/xen/common/kimage.c
index 742e4e8..132dd41 100644
--- a/xen/common/kimage.c
+++ b/xen/common/kimage.c
@@ -495,10 +495,10 @@ static void kimage_terminate(struct kexec_image *image)
  * Call unmap_domain_page(ptr) after the loop exits.
  */
 #define for_each_kimage_entry(image, ptr, entry)                        \
-    for ( ptr = map_domain_page(image->head >> PAGE_SHIFT);             \
+    for ( ptr = map_domain_page(_mfn(paddr_to_mfn(image->head)));       \
           (entry = *ptr) && !(entry & IND_DONE);                        \
           ptr = (entry & IND_INDIRECTION) ?                             \
-              (unmap_domain_page(ptr), map_domain_page(entry >> PAGE_SHIFT)) \
+              (unmap_domain_page(ptr), map_domain_page(_mfn(paddr_to_mfn(entry)))) \
               : ptr + 1 )
 
 static void kimage_free_entry(kimage_entry_t entry)
@@ -748,7 +748,7 @@ static int kimage_load_crash_segment(struct kexec_image *image,
         dchunk = PAGE_SIZE;
         schunk = min(dchunk, sbytes);
 
-        dest_va = map_domain_page(dest_mfn);
+        dest_va = map_domain_page(_mfn(dest_mfn));
         if ( !dest_va )
             return -EINVAL;
 
@@ -866,7 +866,7 @@ int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
     int ret = 0;
     paddr_t dest = KIMAGE_NO_DEST;
 
-    page = map_domain_page(ind_mfn);
+    page = map_domain_page(_mfn(ind_mfn));
     if ( !page )
         return -ENOMEM;
 
@@ -892,7 +892,7 @@ int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
             break;
         case IND_INDIRECTION:
             unmap_domain_page(page);
-            page = map_domain_page(mfn);
+            page = map_domain_page(_mfn(mfn));
             entry = page;
             continue;
         case IND_DONE:
diff --git a/xen/common/memory.c b/xen/common/memory.c
index ae4c32e..e5d49d8 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1172,7 +1172,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 
 void clear_domain_page(mfn_t mfn)
 {
-    void *ptr = map_domain_page(mfn_x(mfn));
+    void *ptr = map_domain_page(mfn);
 
     clear_page(ptr);
     unmap_domain_page(ptr);
@@ -1180,8 +1180,8 @@ void clear_domain_page(mfn_t mfn)
 
 void copy_domain_page(mfn_t dest, mfn_t source)
 {
-    const void *src = map_domain_page(mfn_x(source));
-    void *dst = map_domain_page(mfn_x(dest));
+    const void *src = map_domain_page(source);
+    void *dst = map_domain_page(dest);
 
     copy_page(dst, src);
     unmap_domain_page(dst);
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 5ef131b..71cb7d5 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -77,7 +77,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
 
     *pcli_mfn = page_to_mfn(page);
     *pcli_pfp = page;
-    return map_domain_page(*pcli_mfn);
+    return map_domain_page(_mfn(*pcli_mfn));
 }
 
 static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
@@ -104,7 +104,7 @@ int tmem_copy_from_client(struct page_info *pfp,
 
     ASSERT(pfp != NULL);
     tmem_mfn = page_to_mfn(pfp);
-    tmem_va = map_domain_page(tmem_mfn);
+    tmem_va = map_domain_page(_mfn(tmem_mfn));
     if ( guest_handle_is_null(clibuf) )
     {
         cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
@@ -174,7 +174,7 @@ int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
             return -EFAULT;
     }
     tmem_mfn = page_to_mfn(pfp);
-    tmem_va = map_domain_page(tmem_mfn);
+    tmem_va = map_domain_page(_mfn(tmem_mfn));
     if ( cli_va )
     {
         memcpy(cli_va, tmem_va, PAGE_SIZE);
diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c
index 98e7b38..0ac0ed3 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -203,7 +203,7 @@ void guest_iommu_add_ppr_log(struct domain *d, u32 entry[])
                                     sizeof(ppr_entry_t), tail);
     ASSERT(mfn_valid(mfn));
 
-    log_base = map_domain_page(mfn);
+    log_base = map_domain_page(_mfn(mfn));
     log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
 
     /* Convert physical device id back into virtual device id */
@@ -252,7 +252,7 @@ void guest_iommu_add_event_log(struct domain *d, u32 entry[])
                                     sizeof(event_entry_t), tail);
     ASSERT(mfn_valid(mfn));
 
-    log_base = map_domain_page(mfn);
+    log_base = map_domain_page(_mfn(mfn));
     log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
 
     /* re-write physical device id into virtual device id */
@@ -377,7 +377,7 @@ static int do_completion_wait(struct domain *d, cmd_entry_t *cmd)
         gaddr_64 = (gaddr_hi << 32) | (gaddr_lo << 3);
 
         gfn = gaddr_64 >> PAGE_SHIFT;
-        vaddr = map_domain_page(mfn_x(get_gfn(d, gfn ,&p2mt)));
+        vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt));
         put_gfn(d, gfn);
 
         write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE-1))),
@@ -425,7 +425,7 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd)
     ASSERT(mfn_valid(dte_mfn));
 
     /* Read guest dte information */
-    dte_base = map_domain_page(dte_mfn);
+    dte_base = map_domain_page(_mfn(dte_mfn));
 
     gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t));
 
@@ -506,7 +506,7 @@ static void guest_iommu_process_command(unsigned long _d)
                                             sizeof(cmd_entry_t), head);
         ASSERT(mfn_valid(cmd_mfn));
 
-        cmd_base = map_domain_page(cmd_mfn);
+        cmd_base = map_domain_page(_mfn(cmd_mfn));
         cmd = cmd_base + head % entries_per_page;
 
         opcode = get_field_from_reg_u32(cmd->data[1],
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 64c5225..3dfab6d 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -42,7 +42,7 @@ void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long gfn)
 {
     u64 *table, *pte;
 
-    table = map_domain_page(l1_mfn);
+    table = map_domain_page(_mfn(l1_mfn));
     pte = table + pfn_to_pde_idx(gfn, IOMMU_PAGING_MODE_LEVEL_1);
     *pte = 0;
     unmap_domain_page(table);
@@ -115,7 +115,7 @@ static bool_t set_iommu_pte_present(unsigned long pt_mfn, unsigned long gfn,
     u32 *pde;
     bool_t need_flush = 0;
 
-    table = map_domain_page(pt_mfn);
+    table = map_domain_page(_mfn(pt_mfn));
 
     pde = (u32*)(table + pfn_to_pde_idx(gfn, pde_level));
 
@@ -349,12 +349,12 @@ static int iommu_update_pde_count(struct domain *d, unsigned long pt_mfn,
     next_level = merge_level - 1;
 
     /* get pde at merge level */
-    table = map_domain_page(pt_mfn);
+    table = map_domain_page(_mfn(pt_mfn));
     pde = table + pfn_to_pde_idx(gfn, merge_level);
 
     /* get page table of next level */
     ntable_maddr = amd_iommu_get_next_table_from_pte((u32*)pde);
-    ntable = map_domain_page(ntable_maddr >> PAGE_SHIFT);
+    ntable = map_domain_page(_mfn(paddr_to_mfn(ntable_maddr)));
 
     /* get the first mfn of next level */
     first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
@@ -400,7 +400,7 @@ static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn,
 
     ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
 
-    table = map_domain_page(pt_mfn);
+    table = map_domain_page(_mfn(pt_mfn));
     pde = table + pfn_to_pde_idx(gfn, merge_level);
 
     /* get first mfn */
@@ -412,7 +412,7 @@ static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn,
         return 1;
     }
 
-    ntable = map_domain_page(ntable_mfn);
+    ntable = map_domain_page(_mfn(ntable_mfn));
     first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
 
     if ( first_mfn == 0 )
@@ -467,7 +467,7 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
         unsigned int next_level = level - 1;
         pt_mfn[level] = next_table_mfn;
 
-        next_table_vaddr = map_domain_page(next_table_mfn);
+        next_table_vaddr = map_domain_page(_mfn(next_table_mfn));
         pde = next_table_vaddr + pfn_to_pde_idx(pfn, level);
 
         /* Here might be a super page frame */
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c
index 109234e..2b4169a 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -41,7 +41,7 @@ boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
 
 void *map_vtd_domain_page(u64 maddr)
 {
-    return map_domain_page(maddr >> PAGE_SHIFT_4K);
+    return map_domain_page(_mfn(paddr_to_mfn(maddr)));
 }
 
 void unmap_vtd_domain_page(void *va)
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 2e1f21a..6abb047 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -207,6 +207,8 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
 #define paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
+#define mfn_to_paddr(mfn) ((paddr_t)(mfn) << PAGE_SHIFT)
+#define paddr_to_mfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
 #define vmap_to_mfn(va)     paddr_to_pfn(virt_to_maddr((vaddr_t)va))
 #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index 7876527..ca590f3 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -37,7 +37,7 @@
 static inline void *
 hap_map_domain_page(mfn_t mfn)
 {
-    return map_domain_page(mfn_x(mfn));
+    return map_domain_page(mfn);
 }
 
 static inline void
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index e26daaf..4ed327a 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -172,9 +172,9 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
 #define l3e_to_l2e(x)              ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
 #define l4e_to_l3e(x)              ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
 
-#define map_l1t_from_l2e(x)        ((l1_pgentry_t *)map_domain_page(l2e_get_pfn(x)))
-#define map_l2t_from_l3e(x)        ((l2_pgentry_t *)map_domain_page(l3e_get_pfn(x)))
-#define map_l3t_from_l4e(x)        ((l3_pgentry_t *)map_domain_page(l4e_get_pfn(x)))
+#define map_l1t_from_l2e(x)        ((l1_pgentry_t *)map_domain_page(_mfn(l2e_get_pfn(x))))
+#define map_l2t_from_l3e(x)        ((l2_pgentry_t *)map_domain_page(_mfn(l3e_get_pfn(x))))
+#define map_l3t_from_l4e(x)        ((l3_pgentry_t *)map_domain_page(_mfn(l4e_get_pfn(x))))
 
 /* Given a virtual address, get an entry offset into a page table. */
 #define l1_table_offset(a)         \
@@ -233,6 +233,8 @@ void copy_page_sse2(void *, const void *);
 /* Convert between frame number and address formats.  */
 #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
 #define __paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
+#define __mfn_to_paddr(mfn) ((paddr_t)(mfn) << PAGE_SHIFT)
+#define __paddr_to_mfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
 
 /* Convert between machine frame numbers and spage-info structures. */
 #define __mfn_to_spage(mfn)  (spage_table + pfn_to_sdx(mfn))
@@ -262,6 +264,8 @@ void copy_page_sse2(void *, const void *);
 #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
 #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
+#define mfn_to_paddr(mfn)   __mfn_to_paddr(mfn)
+#define paddr_to_mfn(pa)    __paddr_to_mfn(pa)
 #define vmap_to_mfn(va)     l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va)))
 #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 9c32665..7a09881 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -376,7 +376,7 @@ guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned long *gl1mfn)
          != _PAGE_PRESENT )
         return NULL;
     *gl1mfn = l2e_get_pfn(l2e);
-    return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(addr);
+    return (l1_pgentry_t *)map_domain_page(_mfn(*gl1mfn)) + l1_table_offset(addr);
 }
 
 /* Pull down the mapping we got from guest_map_l1e() */
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index 41f365c..f680a53 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -23,7 +23,7 @@ void copy_domain_page(mfn_t dst, const mfn_t src);
  * Map a given page frame, returning the mapped virtual address. The page is
  * then accessible within the current VCPU until a corresponding unmap call.
  */
-void *map_domain_page(unsigned long mfn);
+void *map_domain_page(mfn_t mfn);
 
 /*
  * Pass a VA within a page previously mapped in the context of the
@@ -44,7 +44,7 @@ unsigned long domain_page_map_to_mfn(const void *va);
 void *map_domain_page_global(mfn_t mfn);
 void unmap_domain_page_global(const void *va);
 
-#define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
+#define __map_domain_page(pg)        map_domain_page(_mfn(__page_to_mfn(pg)))
 
 static inline void *__map_domain_page_global(struct page_info *pg)
 {
@@ -84,7 +84,7 @@ map_domain_page_with_cache(unsigned long mfn, struct domain_mmap_cache *cache)
     }
 
     cache->mfn   = mfn;
-    cache->va    = map_domain_page(mfn);
+    cache->va    = map_domain_page(_mfn(mfn));
     cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
 
  done:
@@ -113,7 +113,7 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
 
 #else /* !CONFIG_DOMAIN_PAGE */
 
-#define map_domain_page(mfn)                mfn_to_virt(mfn)
+#define map_domain_page(mfn)                mfn_to_virt(mfn_x(mfn))
 #define __map_domain_page(pg)               page_to_virt(pg)
 #define unmap_domain_page(va)               ((void)(va))
 #define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
  2015-07-02 12:04 [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
  2015-07-02 12:04 ` [PATCH v2 2/3] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
  2015-07-02 12:04 ` [PATCH v2 3/3] Convert map_domain_page() to use the new mfn_t type Ben Catterall
@ 2015-07-07 10:01 ` Jan Beulich
  2015-07-07 10:07   ` Andrew Cooper
  2015-07-07 15:12 ` Ian Campbell
  3 siblings, 1 reply; 15+ messages in thread
From: Jan Beulich @ 2015-07-07 10:01 UTC (permalink / raw)
  To: andrew.cooper3, Ben Catterall
  Cc: xen-devel, keir, stefano.stabellini, ian.campbell, tim

>>> On 02.07.15 at 14:04, <Ben.Catterall@citrix.com> wrote:
> --- a/xen/include/xen/domain_page.h
> +++ b/xen/include/xen/domain_page.h
> @@ -41,11 +41,15 @@ unsigned long domain_page_map_to_mfn(const void *va);
>   * address spaces (not just within the VCPU that created the mapping). Global
>   * mappings can also be unmapped from any context.
>   */
> -void *map_domain_page_global(unsigned long mfn);
> +void *map_domain_page_global(mfn_t mfn);
>  void unmap_domain_page_global(const void *va);
>  
>  #define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
> -#define __map_domain_page_global(pg) map_domain_page_global(__page_to_mfn(pg))
> +
> +static inline void *__map_domain_page_global(struct page_info *pg)

const

> @@ -117,9 +121,17 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
>                                                        mfn_to_virt(smfn))
>  #define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
>  
> -#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
> -#define __map_domain_page_global(pg)        page_to_virt(pg)
> -#define unmap_domain_page_global(va)        ((void)(va))
> +static inline void *map_domain_page_global(mfn_t mfn)
> +{
> +    return mfn_to_virt(mfn_x(mfn));
> +}
> +
> +static inline void *__map_domain_page_global(struct page_info *pg)

const

> +{
> +    return page_to_virt(pg);
> +}
> +
> +static inline void unmap_domain_page_global(void *va) {};

And again (the more that the real function already has it that way).

With these adjusted
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 2/3] xen/domain_page: Convert copy/clear_domain_page() to using mfn_t
  2015-07-02 12:04 ` [PATCH v2 2/3] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
@ 2015-07-07 10:03   ` Jan Beulich
  0 siblings, 0 replies; 15+ messages in thread
From: Jan Beulich @ 2015-07-07 10:03 UTC (permalink / raw)
  To: Ben Catterall
  Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini, xen-devel

>>> On 02.07.15 at 14:04, <Ben.Catterall@citrix.com> wrote:
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> [Convert grant_table.c to pass mfn_t types and fix ARM compiling]
> 
> Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
  2015-07-07 10:01 ` [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Jan Beulich
@ 2015-07-07 10:07   ` Andrew Cooper
  2015-07-07 10:50     ` Jan Beulich
  0 siblings, 1 reply; 15+ messages in thread
From: Andrew Cooper @ 2015-07-07 10:07 UTC (permalink / raw)
  To: Jan Beulich, Ben Catterall
  Cc: xen-devel, keir, stefano.stabellini, ian.campbell, tim

On 07/07/15 11:01, Jan Beulich wrote:
>>>> On 02.07.15 at 14:04, <Ben.Catterall@citrix.com> wrote:
>> --- a/xen/include/xen/domain_page.h
>> +++ b/xen/include/xen/domain_page.h
>> @@ -41,11 +41,15 @@ unsigned long domain_page_map_to_mfn(const void *va);
>>   * address spaces (not just within the VCPU that created the mapping). Global
>>   * mappings can also be unmapped from any context.
>>   */
>> -void *map_domain_page_global(unsigned long mfn);
>> +void *map_domain_page_global(mfn_t mfn);
>>  void unmap_domain_page_global(const void *va);
>>  
>>  #define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
>> -#define __map_domain_page_global(pg) map_domain_page_global(__page_to_mfn(pg))
>> +
>> +static inline void *__map_domain_page_global(struct page_info *pg)
> const
>
>> @@ -117,9 +121,17 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
>>                                                        mfn_to_virt(smfn))
>>  #define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
>>  
>> -#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
>> -#define __map_domain_page_global(pg)        page_to_virt(pg)
>> -#define unmap_domain_page_global(va)        ((void)(va))
>> +static inline void *map_domain_page_global(mfn_t mfn)
>> +{
>> +    return mfn_to_virt(mfn_x(mfn));
>> +}
>> +
>> +static inline void *__map_domain_page_global(struct page_info *pg)
> const
>
>> +{
>> +    return page_to_virt(pg);
>> +}
>> +
>> +static inline void unmap_domain_page_global(void *va) {};
> And again (the more that the real function already has it that way).

Hmm.  Both unmap_domain_page() and _global() should be updated to not
take a const void *va.

Just like free(), these functions are not performing a read-only
operation on the destination pointer, therefore must not be performed on
an actual const pointer.

~Andrew

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] Convert map_domain_page() to use the new mfn_t type
  2015-07-02 12:04 ` [PATCH v2 3/3] Convert map_domain_page() to use the new mfn_t type Ben Catterall
@ 2015-07-07 10:10   ` Jan Beulich
  2015-07-09 10:29     ` Ben Catterall
  0 siblings, 1 reply; 15+ messages in thread
From: Jan Beulich @ 2015-07-07 10:10 UTC (permalink / raw)
  To: Ben Catterall
  Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini, xen-devel

>>> On 02.07.15 at 14:04, <Ben.Catterall@citrix.com> wrote:
> Reworked the internals and declaration, applying (un)boxing
> where needed. Converted calls to map_domain_page() to
> provide mfn_t types, boxing where needed.
> 
> Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> ---
> Changed since v1:
>    * Created paddr_to_mfn() and mfn_to_paddr() for both x86 and ARM
>    * Converted code to use the new paddr_to_mfn() rather than e.g.
>      paddr>>PAGE_SHIFT

This was a bogus change - why can't you use paddr_to_pfn() and
pfn_to_paddr()? And if you needed new macros, they should be
named consistently, i.e. maddr_to_mfn() and mfn_to_maddr().
And perhaps they should then produce/take mfn_t?

> @@ -194,7 +194,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
>          {
>              if ( page->count_info & PGC_page_table )
>              {
> -                void *pg = map_domain_page(mfn);
> +                void *pg = map_domain_page(_mfn(mfn));
>                  vmac_update(pg, PAGE_SIZE, ctx);

Please take the opportunity and add the missing blank line in cases
like this.

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
  2015-07-07 10:07   ` Andrew Cooper
@ 2015-07-07 10:50     ` Jan Beulich
  2015-07-13 16:56       ` Ian Jackson
  0 siblings, 1 reply; 15+ messages in thread
From: Jan Beulich @ 2015-07-07 10:50 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: keir, ian.campbell, tim, stefano.stabellini, xen-devel, Ben Catterall

>>> On 07.07.15 at 12:07, <andrew.cooper3@citrix.com> wrote:
> On 07/07/15 11:01, Jan Beulich wrote:
>>>>> On 02.07.15 at 14:04, <Ben.Catterall@citrix.com> wrote:
>>> --- a/xen/include/xen/domain_page.h
>>> +++ b/xen/include/xen/domain_page.h
>>> @@ -41,11 +41,15 @@ unsigned long domain_page_map_to_mfn(const void *va);
>>>   * address spaces (not just within the VCPU that created the mapping). 
> Global
>>>   * mappings can also be unmapped from any context.
>>>   */
>>> -void *map_domain_page_global(unsigned long mfn);
>>> +void *map_domain_page_global(mfn_t mfn);
>>>  void unmap_domain_page_global(const void *va);
>>>  
>>>  #define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
>>> -#define __map_domain_page_global(pg) 
> map_domain_page_global(__page_to_mfn(pg))
>>> +
>>> +static inline void *__map_domain_page_global(struct page_info *pg)
>> const
>>
>>> @@ -117,9 +121,17 @@ domain_mmap_cache_destroy(struct domain_mmap_cache 
> *cache)
>>>                                                        mfn_to_virt(smfn))
>>>  #define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned 
> long)(va))
>>>  
>>> -#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
>>> -#define __map_domain_page_global(pg)        page_to_virt(pg)
>>> -#define unmap_domain_page_global(va)        ((void)(va))
>>> +static inline void *map_domain_page_global(mfn_t mfn)
>>> +{
>>> +    return mfn_to_virt(mfn_x(mfn));
>>> +}
>>> +
>>> +static inline void *__map_domain_page_global(struct page_info *pg)
>> const
>>
>>> +{
>>> +    return page_to_virt(pg);
>>> +}
>>> +
>>> +static inline void unmap_domain_page_global(void *va) {};
>> And again (the more that the real function already has it that way).
> 
> Hmm.  Both unmap_domain_page() and _global() should be updated to not
> take a const void *va.
> 
> Just like free(), these functions are not performing a read-only
> operation on the destination pointer, therefore must not be performed on
> an actual const pointer.

I disagree - from the caller's persepctive they don't modify the data
being pointed to (that data is simply becoming undefined). An entity
allocating memory, initializing it, and never modifying it again should
be allowed to store the pointer in a variable pointing to a const
modified type, and free/unmap it without needing any casts. I.e. in
fact xfree() and free_xenheap_pages() should have their parameters
constified.

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
  2015-07-02 12:04 [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
                   ` (2 preceding siblings ...)
  2015-07-07 10:01 ` [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Jan Beulich
@ 2015-07-07 15:12 ` Ian Campbell
  3 siblings, 0 replies; 15+ messages in thread
From: Ian Campbell @ 2015-07-07 15:12 UTC (permalink / raw)
  To: Ben Catterall
  Cc: keir, andrew.cooper3, tim, stefano.stabellini, jbeulich, xen-devel

On Thu, 2015-07-02 at 13:04 +0100, Ben Catterall wrote:
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> The sh_map/unmap wrappers can be dropped, and take the opportunity to turn
> some #define's into static inlines, for added type saftey.
> 
> As part of adding the type safety, GCC highlights an problematic include cycle
> with arm/mm.h including domain_page.h which includes xen/mm.h and falls over
> __page_to_mfn being used before being declared.  Simply dropping the inclusion
> of domain_page.h fixes the compilation issue.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Tim Deegan <tim@xen.org>

For ARM:

Acked-by: Ian Campbell <ian.campbell@citrix.com>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] Convert map_domain_page() to use the new mfn_t type
  2015-07-07 10:10   ` Jan Beulich
@ 2015-07-09 10:29     ` Ben Catterall
  2015-07-09 10:38       ` Jan Beulich
  0 siblings, 1 reply; 15+ messages in thread
From: Ben Catterall @ 2015-07-09 10:29 UTC (permalink / raw)
  To: Jan Beulich
  Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini, xen-devel



On 07/07/15 11:10, Jan Beulich wrote:
>>>> On 02.07.15 at 14:04, <Ben.Catterall@citrix.com> wrote:
>> Reworked the internals and declaration, applying (un)boxing
>> where needed. Converted calls to map_domain_page() to
>> provide mfn_t types, boxing where needed.
>>
>> Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
>> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
>>
>> ---
>> Changed since v1:
>>     * Created paddr_to_mfn() and mfn_to_paddr() for both x86 and ARM
>>     * Converted code to use the new paddr_to_mfn() rather than e.g.
>>       paddr>>PAGE_SHIFT
> This was a bogus change - why can't you use paddr_to_pfn() and
> pfn_to_paddr()? And if you needed new macros, they should be
> named consistently, i.e. maddr_to_mfn() and mfn_to_maddr().
> And perhaps they should then produce/take mfn_t?
>
In [PATCH 3/3] Andrew said that I should use _mfn(paddr_to_mfn(ma)) rather than ma >> PAGE_SIZE
so I made the change based on that. Can you clarify if I should proceed and use paddr_to_pfn() instead.

Re. the rename: can you clarify what the difference between maddr (machine addr?) and
paddr (physical addr?) are please?

Thanks!

>> @@ -194,7 +194,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
>>           {
>>               if ( page->count_info & PGC_page_table )
>>               {
>> -                void *pg = map_domain_page(mfn);
>> +                void *pg = map_domain_page(_mfn(mfn));
>>                   vmac_update(pg, PAGE_SIZE, ctx);
> Please take the opportunity and add the missing blank line in cases
> like this.
>
> Jan
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] Convert map_domain_page() to use the new mfn_t type
  2015-07-09 10:29     ` Ben Catterall
@ 2015-07-09 10:38       ` Jan Beulich
  0 siblings, 0 replies; 15+ messages in thread
From: Jan Beulich @ 2015-07-09 10:38 UTC (permalink / raw)
  To: Ben Catterall
  Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini, xen-devel

>>> On 09.07.15 at 12:29, <Ben.Catterall@citrix.com> wrote:

> 
> On 07/07/15 11:10, Jan Beulich wrote:
>>>>> On 02.07.15 at 14:04, <Ben.Catterall@citrix.com> wrote:
>>> Reworked the internals and declaration, applying (un)boxing
>>> where needed. Converted calls to map_domain_page() to
>>> provide mfn_t types, boxing where needed.
>>>
>>> Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
>>> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
>>>
>>> ---
>>> Changed since v1:
>>>     * Created paddr_to_mfn() and mfn_to_paddr() for both x86 and ARM
>>>     * Converted code to use the new paddr_to_mfn() rather than e.g.
>>>       paddr>>PAGE_SHIFT
>> This was a bogus change - why can't you use paddr_to_pfn() and
>> pfn_to_paddr()? And if you needed new macros, they should be
>> named consistently, i.e. maddr_to_mfn() and mfn_to_maddr().
>> And perhaps they should then produce/take mfn_t?
>>
> In [PATCH 3/3] Andrew said that I should use _mfn(paddr_to_mfn(ma)) rather 
> than ma >> PAGE_SIZE
> so I made the change based on that. Can you clarify if I should proceed and 
> use paddr_to_pfn() instead.

Yes, I'd prefer not to add new macros when we have existing ones
that fit our needs. And in no case would I accept ones with
inconsistent names.

> Re. the rename: can you clarify what the difference between maddr (machine 
> addr?) and
> paddr (physical addr?) are please?

>From only the hypervisor perspective there's none. When dealing with
PV guests, physical and machine addresses are different. For the
frame number <-> address conversion, though, the precise kind of
address doesn't matter at all, since the page size universal.

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
  2015-07-07 10:50     ` Jan Beulich
@ 2015-07-13 16:56       ` Ian Jackson
  2015-07-14  9:56         ` Jan Beulich
  0 siblings, 1 reply; 15+ messages in thread
From: Ian Jackson @ 2015-07-13 16:56 UTC (permalink / raw)
  To: Jan Beulich
  Cc: keir, ian.campbell, Andrew Cooper, tim, stefano.stabellini,
	Ben Catterall, xen-devel

Jan Beulich writes ("Re: [Xen-devel] [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t"):
> On 07.07.15 at 12:07, <andrew.cooper3@citrix.com> wrote:
> > Just like free(), these functions are not performing a read-only
> > operation on the destination pointer, therefore must not be performed on
> > an actual const pointer.
> 
> I disagree - from the caller's persepctive they don't modify the data
> being pointed to (that data is simply becoming undefined). An entity
> allocating memory, initializing it, and never modifying it again should
> be allowed to store the pointer in a variable pointing to a const
> modified type, and free/unmap it without needing any casts. I.e. in
> fact xfree() and free_xenheap_pages() should have their parameters
> constified.

Surely xfree() ought to have the same prototype as free() ?

free is declared in C99 (7.20.3.2 in my copy of TC2) as:

    void free(void *ptr);

That is, non-const.  AFAIAA this is not generally regarded as a bug.

I think the same reasoning and conclusion ought to apply to xfree, and
other freeing functions.

Ian.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
  2015-07-13 16:56       ` Ian Jackson
@ 2015-07-14  9:56         ` Jan Beulich
  2015-07-14 10:54           ` Ian Jackson
  0 siblings, 1 reply; 15+ messages in thread
From: Jan Beulich @ 2015-07-14  9:56 UTC (permalink / raw)
  To: Ian Jackson
  Cc: keir, ian.campbell, Andrew Cooper, tim, stefano.stabellini,
	Ben Catterall, xen-devel

>>> On 13.07.15 at 18:56, <Ian.Jackson@eu.citrix.com> wrote:
> Jan Beulich writes ("Re: [Xen-devel] [PATCH v2 1/3] xen/domain_page: Convert 
> map_domain_page_global() to using mfn_t"):
>> On 07.07.15 at 12:07, <andrew.cooper3@citrix.com> wrote:
>> > Just like free(), these functions are not performing a read-only
>> > operation on the destination pointer, therefore must not be performed on
>> > an actual const pointer.
>> 
>> I disagree - from the caller's persepctive they don't modify the data
>> being pointed to (that data is simply becoming undefined). An entity
>> allocating memory, initializing it, and never modifying it again should
>> be allowed to store the pointer in a variable pointing to a const
>> modified type, and free/unmap it without needing any casts. I.e. in
>> fact xfree() and free_xenheap_pages() should have their parameters
>> constified.
> 
> Surely xfree() ought to have the same prototype as free() ?

Why? If it were to be a full match, why wouldn't we call it free() in
the first place?

Note also that Linux has

void kfree(const void *);
void kzfree(const void *);

(with even the krealloc() flavors matching that model).

> free is declared in C99 (7.20.3.2 in my copy of TC2) as:
> 
>     void free(void *ptr);
> 
> That is, non-const.  AFAIAA this is not generally regarded as a bug.

Perhaps, but certainly depending on who looks at it. I for my part
dislike (as hinted at above) that I have to cast away const-ness in
order to be able to call free() without causing compiler warnings,
and I generally hide this in wrappers to limit such casting.

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
  2015-07-14  9:56         ` Jan Beulich
@ 2015-07-14 10:54           ` Ian Jackson
  2015-07-14 11:38             ` Jan Beulich
  0 siblings, 1 reply; 15+ messages in thread
From: Ian Jackson @ 2015-07-14 10:54 UTC (permalink / raw)
  To: Jan Beulich
  Cc: keir, ian.campbell, Andrew Cooper, tim, stefano.stabellini,
	Ben Catterall, xen-devel

Jan Beulich writes ("Re: [Xen-devel] [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t"):
> On 13.07.15 at 18:56, <Ian.Jackson@eu.citrix.com> wrote:
> > Surely xfree() ought to have the same prototype as free() ?
> 
> Why? If it were to be a full match, why wouldn't we call it free() in
> the first place?

Is that what is supposed to differ between free and xfree ?  That
would be a bit odd.

In the userland world, xfree is the companion to xmalloc:

  http://manpages.debian.org/cgi-bin/man.cgi?query=xfree&apropos=0&sektion=0&manpath=Debian+8+jessie&format=html&locale=en

(Although, logically speaking, xfree is unnecessary in that set.)

> Note also that Linux has
> 
> void kfree(const void *);
> void kzfree(const void *);
> 
> (with even the krealloc() flavors matching that model).

How odd.

> > free is declared in C99 (7.20.3.2 in my copy of TC2) as:
> > 
> >     void free(void *ptr);
> > 
> > That is, non-const.  AFAIAA this is not generally regarded as a bug.
> 
> Perhaps, but certainly depending on who looks at it. I for my part
> dislike (as hinted at above) that I have to cast away const-ness in
> order to be able to call free() without causing compiler warnings,
> and I generally hide this in wrappers to limit such casting.

The flipside is that if free can take a const*, functions which take a
const struct foo* can free it.  That's not what I would expect such a
function to do.


Do we need to resolve this disagreement now ?

Ian.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t
  2015-07-14 10:54           ` Ian Jackson
@ 2015-07-14 11:38             ` Jan Beulich
  0 siblings, 0 replies; 15+ messages in thread
From: Jan Beulich @ 2015-07-14 11:38 UTC (permalink / raw)
  To: Ian Jackson
  Cc: keir, ian.campbell, Andrew Cooper, tim, stefano.stabellini,
	Ben Catterall, xen-devel

>>> On 14.07.15 at 12:54, <Ian.Jackson@eu.citrix.com> wrote:
> Jan Beulich writes ("Re: [Xen-devel] [PATCH v2 1/3] xen/domain_page: Convert 
> map_domain_page_global() to using mfn_t"):
>> On 13.07.15 at 18:56, <Ian.Jackson@eu.citrix.com> wrote:
>> > Surely xfree() ought to have the same prototype as free() ?
>> 
>> Why? If it were to be a full match, why wouldn't we call it free() in
>> the first place?
> 
> Is that what is supposed to differ between free and xfree ?  That
> would be a bit odd.
> 
> In the userland world, xfree is the companion to xmalloc:
> 
>   
> http://manpages.debian.org/cgi-bin/man.cgi?query=xfree&apropos=0&sektion=0&manp 
> ath=Debian+8+jessie&format=html&locale=en
> 
> (Although, logically speaking, xfree is unnecessary in that set.)

And I don't view the hypervisor's xmalloc() as a counterpart to the
user mode one, but as one of Linux'es kmalloc().

>> > free is declared in C99 (7.20.3.2 in my copy of TC2) as:
>> > 
>> >     void free(void *ptr);
>> > 
>> > That is, non-const.  AFAIAA this is not generally regarded as a bug.
>> 
>> Perhaps, but certainly depending on who looks at it. I for my part
>> dislike (as hinted at above) that I have to cast away const-ness in
>> order to be able to call free() without causing compiler warnings,
>> and I generally hide this in wrappers to limit such casting.
> 
> The flipside is that if free can take a const*, functions which take a
> const struct foo* can free it.  That's not what I would expect such a
> function to do.

Depends.

> Do we need to resolve this disagreement now ?

As long as no patch is pending to adjust xfree(), I don't think so.
And of course unless you request what the patch here (already
have gone in) did to be adjusted.

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2015-07-14 11:38 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-07-02 12:04 [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
2015-07-02 12:04 ` [PATCH v2 2/3] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
2015-07-07 10:03   ` Jan Beulich
2015-07-02 12:04 ` [PATCH v2 3/3] Convert map_domain_page() to use the new mfn_t type Ben Catterall
2015-07-07 10:10   ` Jan Beulich
2015-07-09 10:29     ` Ben Catterall
2015-07-09 10:38       ` Jan Beulich
2015-07-07 10:01 ` [PATCH v2 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t Jan Beulich
2015-07-07 10:07   ` Andrew Cooper
2015-07-07 10:50     ` Jan Beulich
2015-07-13 16:56       ` Ian Jackson
2015-07-14  9:56         ` Jan Beulich
2015-07-14 10:54           ` Ian Jackson
2015-07-14 11:38             ` Jan Beulich
2015-07-07 15:12 ` Ian Campbell

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).