All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] p2m: split mem_access into separate files
@ 2016-12-08 22:57 Tamas K Lengyel
  2016-12-09  8:35 ` Razvan Cojocaru
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Tamas K Lengyel @ 2016-12-08 22:57 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Razvan Cojocaru, George Dunlap,
	Tamas K Lengyel, Julien Grall, Jan Beulich, Andrew Cooper

This patch relocates mem_access components that are currently mixed with p2m
code into separate files. This better aligns the code with similar subsystems,
such as mem_sharing and mem_paging, which are already in separate files. There
are no code-changes introduced, the patch is mechanical code movement.

On ARM we also relocate the static inline gfn_next_boundary function to p2m.h
as it is a function the mem_access code needs access to.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
---
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Razvan Cojocaru <rcojocaru@bitdefender.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
---
 MAINTAINERS                      |   2 +
 xen/arch/arm/Makefile            |   1 +
 xen/arch/arm/mem_access.c        | 477 +++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/p2m.c               | 461 +------------------------------------
 xen/arch/arm/traps.c             |   1 +
 xen/arch/x86/hvm/hvm.c           |   1 +
 xen/arch/x86/mm/Makefile         |   1 +
 xen/arch/x86/mm/mem_access.c     | 463 +++++++++++++++++++++++++++++++++++++
 xen/arch/x86/mm/p2m.c            | 421 ----------------------------------
 xen/arch/x86/vm_event.c          |   1 +
 xen/common/mem_access.c          |   3 +-
 xen/common/vm_event.c            |   1 +
 xen/include/asm-arm/mem_access.h |  60 +++++
 xen/include/asm-arm/p2m.h        |  31 ++-
 xen/include/asm-x86/mem_access.h |  61 +++++
 xen/include/asm-x86/p2m.h        |  24 +-
 xen/include/xen/mem_access.h     |  64 +++++-
 xen/include/xen/p2m-common.h     |  52 -----
 18 files changed, 1142 insertions(+), 983 deletions(-)
 create mode 100644 xen/arch/arm/mem_access.c
 create mode 100644 xen/arch/x86/mm/mem_access.c
 create mode 100644 xen/include/asm-arm/mem_access.h
 create mode 100644 xen/include/asm-x86/mem_access.h

diff --git a/MAINTAINERS b/MAINTAINERS
index f0d0202..fb26be3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -402,6 +402,8 @@ S:	Supported
 F:	tools/tests/xen-access
 F:	xen/arch/*/monitor.c
 F:	xen/arch/*/vm_event.c
+F:	xen/arch/arm/mem_access.c
+F:	xen/arch/x86/mm/mem_access.c
 F:	xen/arch/x86/hvm/monitor.c
 F:	xen/common/mem_access.c
 F:	xen/common/monitor.c
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index da39d39..b095e8a 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -24,6 +24,7 @@ obj-y += io.o
 obj-y += irq.o
 obj-y += kernel.o
 obj-$(CONFIG_LIVEPATCH) += livepatch.o
+obj-y += mem_access.o
 obj-y += mm.o
 obj-y += monitor.o
 obj-y += p2m.o
diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
new file mode 100644
index 0000000..a96dc10
--- /dev/null
+++ b/xen/arch/arm/mem_access.c
@@ -0,0 +1,477 @@
+/*
+ * arch/arm/mem_access.c
+ *
+ * Architecture-specific mem_access handling routines
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <xen/vm_event.h>
+#include <xen/monitor.h>
+#include <public/vm_event.h>
+#include <asm/event.h>
+
+static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
+                                xenmem_access_t *access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    void *i;
+    unsigned int index;
+
+    static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
+            ACCESS(n),
+            ACCESS(r),
+            ACCESS(w),
+            ACCESS(rw),
+            ACCESS(x),
+            ACCESS(rx),
+            ACCESS(wx),
+            ACCESS(rwx),
+            ACCESS(rx2rw),
+            ACCESS(n2rwx),
+#undef ACCESS
+    };
+
+    ASSERT(p2m_is_locked(p2m));
+
+    /* If no setting was ever set, just return rwx. */
+    if ( !p2m->mem_access_enabled )
+    {
+        *access = XENMEM_access_rwx;
+        return 0;
+    }
+
+    /* If request to get default access. */
+    if ( gfn_eq(gfn, INVALID_GFN) )
+    {
+        *access = memaccess[p2m->default_access];
+        return 0;
+    }
+
+    i = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
+
+    if ( !i )
+    {
+        /*
+         * No setting was found in the Radix tree. Check if the
+         * entry exists in the page-tables.
+         */
+        mfn_t mfn = p2m_get_entry(p2m, gfn, NULL, NULL, NULL);
+
+        if ( mfn_eq(mfn, INVALID_MFN) )
+            return -ESRCH;
+
+        /* If entry exists then its rwx. */
+        *access = XENMEM_access_rwx;
+    }
+    else
+    {
+        /* Setting was found in the Radix tree. */
+        index = radix_tree_ptr_to_int(i);
+        if ( index >= ARRAY_SIZE(memaccess) )
+            return -ERANGE;
+
+        *access = memaccess[index];
+    }
+
+    return 0;
+}
+
+/*
+ * Lookup the MFN corresponding to a domain's GFN.
+ * Lookup mem access in the ratrix tree.
+ * The entries associated to the GFN is considered valid.
+ */
+p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn)
+{
+    void *ptr;
+
+    if ( !p2m->mem_access_enabled )
+        return p2m->default_access;
+
+    ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
+    if ( !ptr )
+        return p2m_access_rwx;
+    else
+        return radix_tree_ptr_to_int(ptr);
+}
+
+int p2m_mem_access_radix_set(struct p2m_domain *p2m, gfn_t gfn,
+                             p2m_access_t a)
+{
+    int rc;
+
+    if ( !p2m->mem_access_enabled )
+        return 0;
+
+    if ( p2m_access_rwx == a )
+    {
+        radix_tree_delete(&p2m->mem_access_settings, gfn_x(gfn));
+        return 0;
+    }
+
+    rc = radix_tree_insert(&p2m->mem_access_settings, gfn_x(gfn),
+                           radix_tree_int_to_ptr(a));
+    if ( rc == -EEXIST )
+    {
+        /* If a setting already exists, change it to the new one */
+        radix_tree_replace_slot(
+            radix_tree_lookup_slot(
+                &p2m->mem_access_settings, gfn_x(gfn)),
+            radix_tree_int_to_ptr(a));
+        rc = 0;
+    }
+
+    return rc;
+}
+
+/*
+ * If mem_access is in use it might have been the reason why get_page_from_gva
+ * failed to fetch the page, as it uses the MMU for the permission checking.
+ * Only in these cases we do a software-based type check and fetch the page if
+ * we indeed found a conflicting mem_access setting.
+ */
+struct page_info*
+p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag)
+{
+    long rc;
+    paddr_t ipa;
+    gfn_t gfn;
+    mfn_t mfn;
+    xenmem_access_t xma;
+    p2m_type_t t;
+    struct page_info *page = NULL;
+    struct p2m_domain *p2m = &current->domain->arch.p2m;
+
+    rc = gva_to_ipa(gva, &ipa, flag);
+    if ( rc < 0 )
+        goto err;
+
+    gfn = _gfn(paddr_to_pfn(ipa));
+
+    /*
+     * We do this first as this is faster in the default case when no
+     * permission is set on the page.
+     */
+    rc = __p2m_get_mem_access(current->domain, gfn, &xma);
+    if ( rc < 0 )
+        goto err;
+
+    /* Let's check if mem_access limited the access. */
+    switch ( xma )
+    {
+    default:
+    case XENMEM_access_rwx:
+    case XENMEM_access_rw:
+        /*
+         * If mem_access contains no rw perm restrictions at all then the original
+         * fault was correct.
+         */
+        goto err;
+    case XENMEM_access_n2rwx:
+    case XENMEM_access_n:
+    case XENMEM_access_x:
+        /*
+         * If no r/w is permitted by mem_access, this was a fault caused by mem_access.
+         */
+        break;
+    case XENMEM_access_wx:
+    case XENMEM_access_w:
+        /*
+         * If this was a read then it was because of mem_access, but if it was
+         * a write then the original get_page_from_gva fault was correct.
+         */
+        if ( flag == GV2M_READ )
+            break;
+        else
+            goto err;
+    case XENMEM_access_rx2rw:
+    case XENMEM_access_rx:
+    case XENMEM_access_r:
+        /*
+         * If this was a write then it was because of mem_access, but if it was
+         * a read then the original get_page_from_gva fault was correct.
+         */
+        if ( flag == GV2M_WRITE )
+            break;
+        else
+            goto err;
+    }
+
+    /*
+     * We had a mem_access permission limiting the access, but the page type
+     * could also be limiting, so we need to check that as well.
+     */
+    mfn = p2m_get_entry(p2m, gfn, &t, NULL, NULL);
+    if ( mfn_eq(mfn, INVALID_MFN) )
+        goto err;
+
+    if ( !mfn_valid(mfn_x(mfn)) )
+        goto err;
+
+    /*
+     * Base type doesn't allow r/w
+     */
+    if ( t != p2m_ram_rw )
+        goto err;
+
+    page = mfn_to_page(mfn_x(mfn));
+
+    if ( unlikely(!get_page(page, current->domain)) )
+        page = NULL;
+
+err:
+    return page;
+}
+
+bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
+{
+    int rc;
+    bool_t violation;
+    xenmem_access_t xma;
+    vm_event_request_t *req;
+    struct vcpu *v = current;
+    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
+
+    /* Mem_access is not in use. */
+    if ( !p2m->mem_access_enabled )
+        return true;
+
+    rc = p2m_get_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), &xma);
+    if ( rc )
+        return true;
+
+    /* Now check for mem_access violation. */
+    switch ( xma )
+    {
+    case XENMEM_access_rwx:
+        violation = false;
+        break;
+    case XENMEM_access_rw:
+        violation = npfec.insn_fetch;
+        break;
+    case XENMEM_access_wx:
+        violation = npfec.read_access;
+        break;
+    case XENMEM_access_rx:
+    case XENMEM_access_rx2rw:
+        violation = npfec.write_access;
+        break;
+    case XENMEM_access_x:
+        violation = npfec.read_access || npfec.write_access;
+        break;
+    case XENMEM_access_w:
+        violation = npfec.read_access || npfec.insn_fetch;
+        break;
+    case XENMEM_access_r:
+        violation = npfec.write_access || npfec.insn_fetch;
+        break;
+    default:
+    case XENMEM_access_n:
+    case XENMEM_access_n2rwx:
+        violation = true;
+        break;
+    }
+
+    if ( !violation )
+        return true;
+
+    /* First, handle rx2rw and n2rwx conversion automatically. */
+    if ( npfec.write_access && xma == XENMEM_access_rx2rw )
+    {
+        rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
+                                0, ~0, XENMEM_access_rw, 0);
+        return false;
+    }
+    else if ( xma == XENMEM_access_n2rwx )
+    {
+        rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
+                                0, ~0, XENMEM_access_rwx, 0);
+    }
+
+    /* Otherwise, check if there is a vm_event monitor subscriber */
+    if ( !vm_event_check_ring(&v->domain->vm_event->monitor) )
+    {
+        /* No listener */
+        if ( p2m->access_required )
+        {
+            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
+                                  "no vm_event listener VCPU %d, dom %d\n",
+                                  v->vcpu_id, v->domain->domain_id);
+            domain_crash(v->domain);
+        }
+        else
+        {
+            /* n2rwx was already handled */
+            if ( xma != XENMEM_access_n2rwx )
+            {
+                /* A listener is not required, so clear the access
+                 * restrictions. */
+                rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
+                                        0, ~0, XENMEM_access_rwx, 0);
+            }
+        }
+
+        /* No need to reinject */
+        return false;
+    }
+
+    req = xzalloc(vm_event_request_t);
+    if ( req )
+    {
+        req->reason = VM_EVENT_REASON_MEM_ACCESS;
+
+        /* Send request to mem access subscriber */
+        req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
+        req->u.mem_access.offset =  gpa & ((1 << PAGE_SHIFT) - 1);
+        if ( npfec.gla_valid )
+        {
+            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
+            req->u.mem_access.gla = gla;
+
+            if ( npfec.kind == npfec_kind_with_gla )
+                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
+            else if ( npfec.kind == npfec_kind_in_gpt )
+                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
+        }
+        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
+        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
+        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
+
+        if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 )
+            domain_crash(v->domain);
+
+        xfree(req);
+    }
+
+    return false;
+}
+
+/*
+ * Set access type for a region of pfns.
+ * If gfn == INVALID_GFN, sets the default access type.
+ */
+long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, xenmem_access_t access,
+                        unsigned int altp2m_idx)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    p2m_access_t a;
+    unsigned int order;
+    long rc = 0;
+
+    static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+        ACCESS(n),
+        ACCESS(r),
+        ACCESS(w),
+        ACCESS(rw),
+        ACCESS(x),
+        ACCESS(rx),
+        ACCESS(wx),
+        ACCESS(rwx),
+        ACCESS(rx2rw),
+        ACCESS(n2rwx),
+#undef ACCESS
+    };
+
+    switch ( access )
+    {
+    case 0 ... ARRAY_SIZE(memaccess) - 1:
+        a = memaccess[access];
+        break;
+    case XENMEM_access_default:
+        a = p2m->default_access;
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    /*
+     * Flip mem_access_enabled to true when a permission is set, as to prevent
+     * allocating or inserting super-pages.
+     */
+    p2m->mem_access_enabled = true;
+
+    /* If request to set default access. */
+    if ( gfn_eq(gfn, INVALID_GFN) )
+    {
+        p2m->default_access = a;
+        return 0;
+    }
+
+    p2m_write_lock(p2m);
+
+    for ( gfn = gfn_add(gfn, start); nr > start;
+          gfn = gfn_next_boundary(gfn, order) )
+    {
+        p2m_type_t t;
+        mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order);
+
+
+        if ( !mfn_eq(mfn, INVALID_MFN) )
+        {
+            order = 0;
+            rc = p2m_set_entry(p2m, gfn, 1, mfn, t, a);
+            if ( rc )
+                break;
+        }
+
+        start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn);
+        /* Check for continuation if it is not the last iteration */
+        if ( nr > start && !(start & mask) && hypercall_preempt_check() )
+        {
+            rc = start;
+            break;
+        }
+    }
+
+    p2m_write_unlock(p2m);
+
+    return rc;
+}
+
+long p2m_set_mem_access_multi(struct domain *d,
+                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
+                              const XEN_GUEST_HANDLE(const_uint8) access_list,
+                              uint32_t nr, uint32_t start, uint32_t mask,
+                              unsigned int altp2m_idx)
+{
+    /* Not yet implemented on ARM. */
+    return -EOPNOTSUPP;
+}
+
+int p2m_get_mem_access(struct domain *d, gfn_t gfn,
+                       xenmem_access_t *access)
+{
+    int ret;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+    p2m_read_lock(p2m);
+    ret = __p2m_get_mem_access(d, gfn, access);
+    p2m_read_unlock(p2m);
+
+    return ret;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index cc5634b..55f249b 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -13,6 +13,7 @@
 #include <asm/event.h>
 #include <asm/hardirq.h>
 #include <asm/page.h>
+#include <asm/mem_access.h>
 
 #ifdef CONFIG_ARM_64
 static unsigned int __read_mostly p2m_root_order;
@@ -58,22 +59,6 @@ static inline bool p2m_is_superpage(lpae_t pte, unsigned int level)
     return (level < 3) && p2m_mapping(pte);
 }
 
-/*
- * Return the start of the next mapping based on the order of the
- * current one.
- */
-static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
-{
-    /*
-     * The order corresponds to the order of the mapping (or invalid
-     * range) in the page table. So we need to align the GFN before
-     * incrementing.
-     */
-    gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
-
-    return gfn_add(gfn, 1UL << order);
-}
-
 static void p2m_flush_tlb(struct p2m_domain *p2m);
 
 /* Unlock the flush and do a P2M TLB flush if necessary */
@@ -229,25 +214,6 @@ static lpae_t *p2m_get_root_pointer(struct p2m_domain *p2m,
     return __map_domain_page(p2m->root + root_table);
 }
 
-/*
- * Lookup the MFN corresponding to a domain's GFN.
- * Lookup mem access in the ratrix tree.
- * The entries associated to the GFN is considered valid.
- */
-static p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn)
-{
-    void *ptr;
-
-    if ( !p2m->mem_access_enabled )
-        return p2m->default_access;
-
-    ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
-    if ( !ptr )
-        return p2m_access_rwx;
-    else
-        return radix_tree_ptr_to_int(ptr);
-}
-
 #define GUEST_TABLE_MAP_FAILED 0
 #define GUEST_TABLE_SUPER_PAGE 1
 #define GUEST_TABLE_NORMAL_PAGE 2
@@ -602,102 +568,6 @@ static int p2m_create_table(struct p2m_domain *p2m, lpae_t *entry)
     return 0;
 }
 
-static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
-                                xenmem_access_t *access)
-{
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    void *i;
-    unsigned int index;
-
-    static const xenmem_access_t memaccess[] = {
-#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
-            ACCESS(n),
-            ACCESS(r),
-            ACCESS(w),
-            ACCESS(rw),
-            ACCESS(x),
-            ACCESS(rx),
-            ACCESS(wx),
-            ACCESS(rwx),
-            ACCESS(rx2rw),
-            ACCESS(n2rwx),
-#undef ACCESS
-    };
-
-    ASSERT(p2m_is_locked(p2m));
-
-    /* If no setting was ever set, just return rwx. */
-    if ( !p2m->mem_access_enabled )
-    {
-        *access = XENMEM_access_rwx;
-        return 0;
-    }
-
-    /* If request to get default access. */
-    if ( gfn_eq(gfn, INVALID_GFN) )
-    {
-        *access = memaccess[p2m->default_access];
-        return 0;
-    }
-
-    i = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
-
-    if ( !i )
-    {
-        /*
-         * No setting was found in the Radix tree. Check if the
-         * entry exists in the page-tables.
-         */
-        mfn_t mfn = p2m_get_entry(p2m, gfn, NULL, NULL, NULL);
-
-        if ( mfn_eq(mfn, INVALID_MFN) )
-            return -ESRCH;
-
-        /* If entry exists then its rwx. */
-        *access = XENMEM_access_rwx;
-    }
-    else
-    {
-        /* Setting was found in the Radix tree. */
-        index = radix_tree_ptr_to_int(i);
-        if ( index >= ARRAY_SIZE(memaccess) )
-            return -ERANGE;
-
-        *access = memaccess[index];
-    }
-
-    return 0;
-}
-
-static int p2m_mem_access_radix_set(struct p2m_domain *p2m, gfn_t gfn,
-                                    p2m_access_t a)
-{
-    int rc;
-
-    if ( !p2m->mem_access_enabled )
-        return 0;
-
-    if ( p2m_access_rwx == a )
-    {
-        radix_tree_delete(&p2m->mem_access_settings, gfn_x(gfn));
-        return 0;
-    }
-
-    rc = radix_tree_insert(&p2m->mem_access_settings, gfn_x(gfn),
-                           radix_tree_int_to_ptr(a));
-    if ( rc == -EEXIST )
-    {
-        /* If a setting already exists, change it to the new one */
-        radix_tree_replace_slot(
-            radix_tree_lookup_slot(
-                &p2m->mem_access_settings, gfn_x(gfn)),
-            radix_tree_int_to_ptr(a));
-        rc = 0;
-    }
-
-    return rc;
-}
-
 enum p2m_operation {
     MEMACCESS,
 };
@@ -1454,105 +1324,6 @@ mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
     return p2m_lookup(d, gfn, NULL);
 }
 
-/*
- * If mem_access is in use it might have been the reason why get_page_from_gva
- * failed to fetch the page, as it uses the MMU for the permission checking.
- * Only in these cases we do a software-based type check and fetch the page if
- * we indeed found a conflicting mem_access setting.
- */
-static struct page_info*
-p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag)
-{
-    long rc;
-    paddr_t ipa;
-    gfn_t gfn;
-    mfn_t mfn;
-    xenmem_access_t xma;
-    p2m_type_t t;
-    struct page_info *page = NULL;
-    struct p2m_domain *p2m = &current->domain->arch.p2m;
-
-    rc = gva_to_ipa(gva, &ipa, flag);
-    if ( rc < 0 )
-        goto err;
-
-    gfn = _gfn(paddr_to_pfn(ipa));
-
-    /*
-     * We do this first as this is faster in the default case when no
-     * permission is set on the page.
-     */
-    rc = __p2m_get_mem_access(current->domain, gfn, &xma);
-    if ( rc < 0 )
-        goto err;
-
-    /* Let's check if mem_access limited the access. */
-    switch ( xma )
-    {
-    default:
-    case XENMEM_access_rwx:
-    case XENMEM_access_rw:
-        /*
-         * If mem_access contains no rw perm restrictions at all then the original
-         * fault was correct.
-         */
-        goto err;
-    case XENMEM_access_n2rwx:
-    case XENMEM_access_n:
-    case XENMEM_access_x:
-        /*
-         * If no r/w is permitted by mem_access, this was a fault caused by mem_access.
-         */
-        break;
-    case XENMEM_access_wx:
-    case XENMEM_access_w:
-        /*
-         * If this was a read then it was because of mem_access, but if it was
-         * a write then the original get_page_from_gva fault was correct.
-         */
-        if ( flag == GV2M_READ )
-            break;
-        else
-            goto err;
-    case XENMEM_access_rx2rw:
-    case XENMEM_access_rx:
-    case XENMEM_access_r:
-        /*
-         * If this was a write then it was because of mem_access, but if it was
-         * a read then the original get_page_from_gva fault was correct.
-         */
-        if ( flag == GV2M_WRITE )
-            break;
-        else
-            goto err;
-    }
-
-    /*
-     * We had a mem_access permission limiting the access, but the page type
-     * could also be limiting, so we need to check that as well.
-     */
-    mfn = p2m_get_entry(p2m, gfn, &t, NULL, NULL);
-    if ( mfn_eq(mfn, INVALID_MFN) )
-        goto err;
-
-    if ( !mfn_valid(mfn_x(mfn)) )
-        goto err;
-
-    /*
-     * Base type doesn't allow r/w
-     */
-    if ( t != p2m_ram_rw )
-        goto err;
-
-    page = mfn_to_page(mfn_x(mfn));
-
-    if ( unlikely(!get_page(page, current->domain)) )
-        page = NULL;
-
-err:
-    return page;
-}
-
 struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
                                     unsigned long flags)
 {
@@ -1665,236 +1436,6 @@ void __init setup_virt_paging(void)
     smp_call_function(setup_virt_paging_one, (void *)val, 1);
 }
 
-bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
-{
-    int rc;
-    bool_t violation;
-    xenmem_access_t xma;
-    vm_event_request_t *req;
-    struct vcpu *v = current;
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
-
-    /* Mem_access is not in use. */
-    if ( !p2m->mem_access_enabled )
-        return true;
-
-    rc = p2m_get_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), &xma);
-    if ( rc )
-        return true;
-
-    /* Now check for mem_access violation. */
-    switch ( xma )
-    {
-    case XENMEM_access_rwx:
-        violation = false;
-        break;
-    case XENMEM_access_rw:
-        violation = npfec.insn_fetch;
-        break;
-    case XENMEM_access_wx:
-        violation = npfec.read_access;
-        break;
-    case XENMEM_access_rx:
-    case XENMEM_access_rx2rw:
-        violation = npfec.write_access;
-        break;
-    case XENMEM_access_x:
-        violation = npfec.read_access || npfec.write_access;
-        break;
-    case XENMEM_access_w:
-        violation = npfec.read_access || npfec.insn_fetch;
-        break;
-    case XENMEM_access_r:
-        violation = npfec.write_access || npfec.insn_fetch;
-        break;
-    default:
-    case XENMEM_access_n:
-    case XENMEM_access_n2rwx:
-        violation = true;
-        break;
-    }
-
-    if ( !violation )
-        return true;
-
-    /* First, handle rx2rw and n2rwx conversion automatically. */
-    if ( npfec.write_access && xma == XENMEM_access_rx2rw )
-    {
-        rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
-                                0, ~0, XENMEM_access_rw, 0);
-        return false;
-    }
-    else if ( xma == XENMEM_access_n2rwx )
-    {
-        rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
-                                0, ~0, XENMEM_access_rwx, 0);
-    }
-
-    /* Otherwise, check if there is a vm_event monitor subscriber */
-    if ( !vm_event_check_ring(&v->domain->vm_event->monitor) )
-    {
-        /* No listener */
-        if ( p2m->access_required )
-        {
-            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
-                                  "no vm_event listener VCPU %d, dom %d\n",
-                                  v->vcpu_id, v->domain->domain_id);
-            domain_crash(v->domain);
-        }
-        else
-        {
-            /* n2rwx was already handled */
-            if ( xma != XENMEM_access_n2rwx )
-            {
-                /* A listener is not required, so clear the access
-                 * restrictions. */
-                rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
-                                        0, ~0, XENMEM_access_rwx, 0);
-            }
-        }
-
-        /* No need to reinject */
-        return false;
-    }
-
-    req = xzalloc(vm_event_request_t);
-    if ( req )
-    {
-        req->reason = VM_EVENT_REASON_MEM_ACCESS;
-
-        /* Send request to mem access subscriber */
-        req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
-        req->u.mem_access.offset =  gpa & ((1 << PAGE_SHIFT) - 1);
-        if ( npfec.gla_valid )
-        {
-            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
-            req->u.mem_access.gla = gla;
-
-            if ( npfec.kind == npfec_kind_with_gla )
-                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
-            else if ( npfec.kind == npfec_kind_in_gpt )
-                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
-        }
-        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
-        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
-        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
-
-        if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 )
-            domain_crash(v->domain);
-
-        xfree(req);
-    }
-
-    return false;
-}
-
-/*
- * Set access type for a region of pfns.
- * If gfn == INVALID_GFN, sets the default access type.
- */
-long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
-                        uint32_t start, uint32_t mask, xenmem_access_t access,
-                        unsigned int altp2m_idx)
-{
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    p2m_access_t a;
-    unsigned int order;
-    long rc = 0;
-
-    static const p2m_access_t memaccess[] = {
-#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
-        ACCESS(n),
-        ACCESS(r),
-        ACCESS(w),
-        ACCESS(rw),
-        ACCESS(x),
-        ACCESS(rx),
-        ACCESS(wx),
-        ACCESS(rwx),
-        ACCESS(rx2rw),
-        ACCESS(n2rwx),
-#undef ACCESS
-    };
-
-    switch ( access )
-    {
-    case 0 ... ARRAY_SIZE(memaccess) - 1:
-        a = memaccess[access];
-        break;
-    case XENMEM_access_default:
-        a = p2m->default_access;
-        break;
-    default:
-        return -EINVAL;
-    }
-
-    /*
-     * Flip mem_access_enabled to true when a permission is set, as to prevent
-     * allocating or inserting super-pages.
-     */
-    p2m->mem_access_enabled = true;
-
-    /* If request to set default access. */
-    if ( gfn_eq(gfn, INVALID_GFN) )
-    {
-        p2m->default_access = a;
-        return 0;
-    }
-
-    p2m_write_lock(p2m);
-
-    for ( gfn = gfn_add(gfn, start); nr > start;
-          gfn = gfn_next_boundary(gfn, order) )
-    {
-        p2m_type_t t;
-        mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order);
-
-
-        if ( !mfn_eq(mfn, INVALID_MFN) )
-        {
-            order = 0;
-            rc = __p2m_set_entry(p2m, gfn, 0, mfn, t, a);
-            if ( rc )
-                break;
-        }
-
-        start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn);
-        /* Check for continuation if it is not the last iteration */
-        if ( nr > start && !(start & mask) && hypercall_preempt_check() )
-        {
-            rc = start;
-            break;
-        }
-    }
-
-    p2m_write_unlock(p2m);
-
-    return rc;
-}
-
-long p2m_set_mem_access_multi(struct domain *d,
-                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
-                              const XEN_GUEST_HANDLE(const_uint8) access_list,
-                              uint32_t nr, uint32_t start, uint32_t mask,
-                              unsigned int altp2m_idx)
-{
-    /* Not yet implemented on ARM. */
-    return -EOPNOTSUPP;
-}
-
-int p2m_get_mem_access(struct domain *d, gfn_t gfn,
-                       xenmem_access_t *access)
-{
-    int ret;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
-    p2m_read_lock(p2m);
-    ret = __p2m_get_mem_access(d, gfn, access);
-    p2m_read_unlock(p2m);
-
-    return ret;
-}
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 8ff73fe..2da8cb4 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -43,6 +43,7 @@
 #include <asm/cpufeature.h>
 #include <asm/flushtlb.h>
 #include <asm/monitor.h>
+#include <asm/mem_access.h>
 
 #include "decode.h"
 #include "vtimer.h"
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e0f936b..a0ee5ba 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -73,6 +73,7 @@
 #include <public/vm_event.h>
 #include <public/arch-x86/cpuid.h>
 #include <asm/cpuid.h>
+#include <asm/mem_access.h>
 
 bool_t __read_mostly hvm_enabled;
 
diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile
index 9804c3a..e977dd8 100644
--- a/xen/arch/x86/mm/Makefile
+++ b/xen/arch/x86/mm/Makefile
@@ -9,6 +9,7 @@ obj-y += guest_walk_3.o
 obj-y += guest_walk_4.o
 obj-y += mem_paging.o
 obj-y += mem_sharing.o
+obj-y += mem_access.o
 
 guest_walk_%.o: guest_walk.c Makefile
 	$(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
new file mode 100644
index 0000000..49af195
--- /dev/null
+++ b/xen/arch/x86/mm/mem_access.c
@@ -0,0 +1,463 @@
+/******************************************************************************
+ * arch/x86/mm/mem_access.c
+ *
+ * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
+ * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
+ * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
+ * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
+ * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/guest_access.h> /* copy_from_guest() */
+#include <xen/mem_access.h>
+#include <xen/vm_event.h>
+#include <xen/event.h>
+#include <public/vm_event.h>
+#include <asm/p2m.h>
+#include <asm/altp2m.h>
+#include <asm/vm_event.h>
+#include <asm/mem_access.h>
+
+#include "mm-locks.h"
+
+bool p2m_mem_access_emulate_check(struct vcpu *v,
+                                  const vm_event_response_t *rsp)
+{
+    xenmem_access_t access;
+    bool violation = 1;
+    const struct vm_event_mem_access *data = &rsp->u.mem_access;
+
+    if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
+    {
+        switch ( access )
+        {
+        case XENMEM_access_n:
+        case XENMEM_access_n2rwx:
+        default:
+            violation = data->flags & MEM_ACCESS_RWX;
+            break;
+
+        case XENMEM_access_r:
+            violation = data->flags & MEM_ACCESS_WX;
+            break;
+
+        case XENMEM_access_w:
+            violation = data->flags & MEM_ACCESS_RX;
+            break;
+
+        case XENMEM_access_x:
+            violation = data->flags & MEM_ACCESS_RW;
+            break;
+
+        case XENMEM_access_rx:
+        case XENMEM_access_rx2rw:
+            violation = data->flags & MEM_ACCESS_W;
+            break;
+
+        case XENMEM_access_wx:
+            violation = data->flags & MEM_ACCESS_R;
+            break;
+
+        case XENMEM_access_rw:
+            violation = data->flags & MEM_ACCESS_X;
+            break;
+
+        case XENMEM_access_rwx:
+            violation = 0;
+            break;
+        }
+    }
+
+    return violation;
+}
+
+bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+                            struct npfec npfec,
+                            vm_event_request_t **req_ptr)
+{
+    struct vcpu *v = current;
+    unsigned long gfn = gpa >> PAGE_SHIFT;
+    struct domain *d = v->domain;
+    struct p2m_domain *p2m = NULL;
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    p2m_access_t p2ma;
+    vm_event_request_t *req;
+    int rc;
+
+    if ( altp2m_active(d) )
+        p2m = p2m_get_altp2m(v);
+    if ( !p2m )
+        p2m = p2m_get_hostp2m(d);
+
+    /* First, handle rx2rw conversion automatically.
+     * These calls to p2m->set_entry() must succeed: we have the gfn
+     * locked and just did a successful get_entry(). */
+    gfn_lock(p2m, gfn, 0);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
+
+    if ( npfec.write_access && p2ma == p2m_access_rx2rw )
+    {
+        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, -1);
+        ASSERT(rc == 0);
+        gfn_unlock(p2m, gfn, 0);
+        return 1;
+    }
+    else if ( p2ma == p2m_access_n2rwx )
+    {
+        ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch);
+        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                            p2mt, p2m_access_rwx, -1);
+        ASSERT(rc == 0);
+    }
+    gfn_unlock(p2m, gfn, 0);
+
+    /* Otherwise, check if there is a memory event listener, and send the message along */
+    if ( !vm_event_check_ring(&d->vm_event->monitor) || !req_ptr )
+    {
+        /* No listener */
+        if ( p2m->access_required )
+        {
+            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
+                                  "no vm_event listener VCPU %d, dom %d\n",
+                                  v->vcpu_id, d->domain_id);
+            domain_crash(v->domain);
+            return 0;
+        }
+        else
+        {
+            gfn_lock(p2m, gfn, 0);
+            mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
+            if ( p2ma != p2m_access_n2rwx )
+            {
+                /* A listener is not required, so clear the access
+                 * restrictions.  This set must succeed: we have the
+                 * gfn locked and just did a successful get_entry(). */
+                rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                                    p2mt, p2m_access_rwx, -1);
+                ASSERT(rc == 0);
+            }
+            gfn_unlock(p2m, gfn, 0);
+            return 1;
+        }
+    }
+
+    *req_ptr = NULL;
+    req = xzalloc(vm_event_request_t);
+    if ( req )
+    {
+        *req_ptr = req;
+
+        req->reason = VM_EVENT_REASON_MEM_ACCESS;
+        req->u.mem_access.gfn = gfn;
+        req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
+        if ( npfec.gla_valid )
+        {
+            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
+            req->u.mem_access.gla = gla;
+
+            if ( npfec.kind == npfec_kind_with_gla )
+                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
+            else if ( npfec.kind == npfec_kind_in_gpt )
+                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
+        }
+        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
+        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
+        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
+    }
+
+    /* Return whether vCPU pause is required (aka. sync event) */
+    return (p2ma != p2m_access_n2rwx);
+}
+
+int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
+                              struct p2m_domain *ap2m, p2m_access_t a,
+                              gfn_t gfn)
+{
+    mfn_t mfn;
+    p2m_type_t t;
+    p2m_access_t old_a;
+    unsigned int page_order;
+    unsigned long gfn_l = gfn_x(gfn);
+    int rc;
+
+    mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
+
+    /* Check host p2m if no valid entry in alternate */
+    if ( !mfn_valid(mfn_x(mfn)) )
+    {
+
+        mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
+                                    P2M_ALLOC | P2M_UNSHARE, &page_order, 0);
+
+        rc = -ESRCH;
+        if ( !mfn_valid(mfn_x(mfn)) || t != p2m_ram_rw )
+            return rc;
+
+        /* If this is a superpage, copy that first */
+        if ( page_order != PAGE_ORDER_4K )
+        {
+            unsigned long mask = ~((1UL << page_order) - 1);
+            unsigned long gfn2_l = gfn_l & mask;
+            mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);
+
+            rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a, 1);
+            if ( rc )
+                return rc;
+        }
+    }
+
+    return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a,
+                         (current->domain != d));
+}
+
+static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
+                          struct p2m_domain *ap2m, p2m_access_t a,
+                          gfn_t gfn)
+{
+    int rc = 0;
+
+    if ( ap2m )
+    {
+        rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
+        /* If the corresponding mfn is invalid we will want to just skip it */
+        if ( rc == -ESRCH )
+            rc = 0;
+    }
+    else
+    {
+        mfn_t mfn;
+        p2m_access_t _a;
+        p2m_type_t t;
+        unsigned long gfn_l = gfn_x(gfn);
+
+        mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
+        rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
+    }
+
+    return rc;
+}
+
+static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m,
+                                        xenmem_access_t xaccess,
+                                        p2m_access_t *paccess)
+{
+    static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+        ACCESS(n),
+        ACCESS(r),
+        ACCESS(w),
+        ACCESS(rw),
+        ACCESS(x),
+        ACCESS(rx),
+        ACCESS(wx),
+        ACCESS(rwx),
+        ACCESS(rx2rw),
+        ACCESS(n2rwx),
+#undef ACCESS
+    };
+
+    switch ( xaccess )
+    {
+    case 0 ... ARRAY_SIZE(memaccess) - 1:
+        *paccess = memaccess[xaccess];
+        break;
+    case XENMEM_access_default:
+        *paccess = p2m->default_access;
+        break;
+    default:
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Set access type for a region of gfns.
+ * If gfn == INVALID_GFN, sets the default access type.
+ */
+long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, xenmem_access_t access,
+                        unsigned int altp2m_idx)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
+    p2m_access_t a;
+    unsigned long gfn_l;
+    long rc = 0;
+
+    /* altp2m view 0 is treated as the hostp2m */
+    if ( altp2m_idx )
+    {
+        if ( altp2m_idx >= MAX_ALTP2M ||
+             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
+            return -EINVAL;
+
+        ap2m = d->arch.altp2m_p2m[altp2m_idx];
+    }
+
+    if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
+        return -EINVAL;
+
+    /* If request to set default access. */
+    if ( gfn_eq(gfn, INVALID_GFN) )
+    {
+        p2m->default_access = a;
+        return 0;
+    }
+
+    p2m_lock(p2m);
+    if ( ap2m )
+        p2m_lock(ap2m);
+
+    for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
+    {
+        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
+
+        if ( rc )
+            break;
+
+        /* Check for continuation if it's not the last iteration. */
+        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+        {
+            rc = start;
+            break;
+        }
+    }
+
+    if ( ap2m )
+        p2m_unlock(ap2m);
+    p2m_unlock(p2m);
+
+    return rc;
+}
+
+long p2m_set_mem_access_multi(struct domain *d,
+                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
+                              const XEN_GUEST_HANDLE(const_uint8) access_list,
+                              uint32_t nr, uint32_t start, uint32_t mask,
+                              unsigned int altp2m_idx)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
+    long rc = 0;
+
+    /* altp2m view 0 is treated as the hostp2m */
+    if ( altp2m_idx )
+    {
+        if ( altp2m_idx >= MAX_ALTP2M ||
+             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
+            return -EINVAL;
+
+        ap2m = d->arch.altp2m_p2m[altp2m_idx];
+    }
+
+    p2m_lock(p2m);
+    if ( ap2m )
+        p2m_lock(ap2m);
+
+    while ( start < nr )
+    {
+        p2m_access_t a;
+        uint8_t access;
+        uint64_t gfn_l;
+
+        if ( copy_from_guest_offset(&gfn_l, pfn_list, start, 1) ||
+             copy_from_guest_offset(&access, access_list, start, 1) )
+        {
+            rc = -EFAULT;
+            break;
+        }
+
+        if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
+        {
+            rc = -EINVAL;
+            break;
+        }
+
+        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
+
+        if ( rc )
+            break;
+
+        /* Check for continuation if it's not the last iteration. */
+        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+        {
+            rc = start;
+            break;
+        }
+    }
+
+    if ( ap2m )
+        p2m_unlock(ap2m);
+    p2m_unlock(p2m);
+
+    return rc;
+}
+
+/*
+ * Get access type for a gfn.
+ * If gfn == INVALID_GFN, gets the default access type.
+ */
+int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    p2m_type_t t;
+    p2m_access_t a;
+    mfn_t mfn;
+
+    static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
+            ACCESS(n),
+            ACCESS(r),
+            ACCESS(w),
+            ACCESS(rw),
+            ACCESS(x),
+            ACCESS(rx),
+            ACCESS(wx),
+            ACCESS(rwx),
+            ACCESS(rx2rw),
+            ACCESS(n2rwx),
+#undef ACCESS
+    };
+
+    /* If request to get default access. */
+    if ( gfn_eq(gfn, INVALID_GFN) )
+    {
+        *access = memaccess[p2m->default_access];
+        return 0;
+    }
+
+    gfn_lock(p2m, gfn, 0);
+    mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL);
+    gfn_unlock(p2m, gfn, 0);
+
+    if ( mfn_eq(mfn, INVALID_MFN) )
+        return -ESRCH;
+
+    if ( (unsigned) a >= ARRAY_SIZE(memaccess) )
+        return -ERANGE;
+
+    *access =  memaccess[a];
+    return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 6a45185..6299d5a 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1589,433 +1589,12 @@ void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp)
     }
 }
 
-bool p2m_mem_access_emulate_check(struct vcpu *v,
-                                  const vm_event_response_t *rsp)
-{
-    xenmem_access_t access;
-    bool violation = 1;
-    const struct vm_event_mem_access *data = &rsp->u.mem_access;
-
-    if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
-    {
-        switch ( access )
-        {
-        case XENMEM_access_n:
-        case XENMEM_access_n2rwx:
-        default:
-            violation = data->flags & MEM_ACCESS_RWX;
-            break;
-
-        case XENMEM_access_r:
-            violation = data->flags & MEM_ACCESS_WX;
-            break;
-
-        case XENMEM_access_w:
-            violation = data->flags & MEM_ACCESS_RX;
-            break;
-
-        case XENMEM_access_x:
-            violation = data->flags & MEM_ACCESS_RW;
-            break;
-
-        case XENMEM_access_rx:
-        case XENMEM_access_rx2rw:
-            violation = data->flags & MEM_ACCESS_W;
-            break;
-
-        case XENMEM_access_wx:
-            violation = data->flags & MEM_ACCESS_R;
-            break;
-
-        case XENMEM_access_rw:
-            violation = data->flags & MEM_ACCESS_X;
-            break;
-
-        case XENMEM_access_rwx:
-            violation = 0;
-            break;
-        }
-    }
-
-    return violation;
-}
-
 void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
 {
     if ( altp2m_active(v->domain) )
         p2m_switch_vcpu_altp2m_by_id(v, idx);
 }
 
-bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
-                            struct npfec npfec,
-                            vm_event_request_t **req_ptr)
-{
-    struct vcpu *v = current;
-    unsigned long gfn = gpa >> PAGE_SHIFT;
-    struct domain *d = v->domain;    
-    struct p2m_domain *p2m = NULL;
-    mfn_t mfn;
-    p2m_type_t p2mt;
-    p2m_access_t p2ma;
-    vm_event_request_t *req;
-    int rc;
-
-    if ( altp2m_active(d) )
-        p2m = p2m_get_altp2m(v);
-    if ( !p2m )
-        p2m = p2m_get_hostp2m(d);
-
-    /* First, handle rx2rw conversion automatically.
-     * These calls to p2m->set_entry() must succeed: we have the gfn
-     * locked and just did a successful get_entry(). */
-    gfn_lock(p2m, gfn, 0);
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
-
-    if ( npfec.write_access && p2ma == p2m_access_rx2rw ) 
-    {
-        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, -1);
-        ASSERT(rc == 0);
-        gfn_unlock(p2m, gfn, 0);
-        return 1;
-    }
-    else if ( p2ma == p2m_access_n2rwx )
-    {
-        ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch);
-        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                            p2mt, p2m_access_rwx, -1);
-        ASSERT(rc == 0);
-    }
-    gfn_unlock(p2m, gfn, 0);
-
-    /* Otherwise, check if there is a memory event listener, and send the message along */
-    if ( !vm_event_check_ring(&d->vm_event->monitor) || !req_ptr ) 
-    {
-        /* No listener */
-        if ( p2m->access_required ) 
-        {
-            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
-                                  "no vm_event listener VCPU %d, dom %d\n",
-                                  v->vcpu_id, d->domain_id);
-            domain_crash(v->domain);
-            return 0;
-        }
-        else
-        {
-            gfn_lock(p2m, gfn, 0);
-            mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
-            if ( p2ma != p2m_access_n2rwx )
-            {
-                /* A listener is not required, so clear the access
-                 * restrictions.  This set must succeed: we have the
-                 * gfn locked and just did a successful get_entry(). */
-                rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                                    p2mt, p2m_access_rwx, -1);
-                ASSERT(rc == 0);
-            }
-            gfn_unlock(p2m, gfn, 0);
-            return 1;
-        }
-    }
-
-    *req_ptr = NULL;
-    req = xzalloc(vm_event_request_t);
-    if ( req )
-    {
-        *req_ptr = req;
-
-        req->reason = VM_EVENT_REASON_MEM_ACCESS;
-        req->u.mem_access.gfn = gfn;
-        req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
-        if ( npfec.gla_valid )
-        {
-            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
-            req->u.mem_access.gla = gla;
-
-            if ( npfec.kind == npfec_kind_with_gla )
-                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
-            else if ( npfec.kind == npfec_kind_in_gpt )
-                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
-        }
-        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
-        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
-        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
-    }
-
-    /* Return whether vCPU pause is required (aka. sync event) */
-    return (p2ma != p2m_access_n2rwx);
-}
-
-static inline
-int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
-                              struct p2m_domain *ap2m, p2m_access_t a,
-                              gfn_t gfn)
-{
-    mfn_t mfn;
-    p2m_type_t t;
-    p2m_access_t old_a;
-    unsigned int page_order;
-    unsigned long gfn_l = gfn_x(gfn);
-    int rc;
-
-    mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
-
-    /* Check host p2m if no valid entry in alternate */
-    if ( !mfn_valid(mfn) )
-    {
-
-        mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
-                                    P2M_ALLOC | P2M_UNSHARE, &page_order, 0);
-
-        rc = -ESRCH;
-        if ( !mfn_valid(mfn) || t != p2m_ram_rw )
-            return rc;
-
-        /* If this is a superpage, copy that first */
-        if ( page_order != PAGE_ORDER_4K )
-        {
-            unsigned long mask = ~((1UL << page_order) - 1);
-            unsigned long gfn2_l = gfn_l & mask;
-            mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);
-
-            rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a, 1);
-            if ( rc )
-                return rc;
-        }
-    }
-
-    return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a,
-                         (current->domain != d));
-}
-
-static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
-                          struct p2m_domain *ap2m, p2m_access_t a,
-                          gfn_t gfn)
-{
-    int rc = 0;
-
-    if ( ap2m )
-    {
-        rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
-        /* If the corresponding mfn is invalid we will want to just skip it */
-        if ( rc == -ESRCH )
-            rc = 0;
-    }
-    else
-    {
-        mfn_t mfn;
-        p2m_access_t _a;
-        p2m_type_t t;
-        unsigned long gfn_l = gfn_x(gfn);
-
-        mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
-        rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
-    }
-
-    return rc;
-}
-
-static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m,
-                                        xenmem_access_t xaccess,
-                                        p2m_access_t *paccess)
-{
-    static const p2m_access_t memaccess[] = {
-#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
-        ACCESS(n),
-        ACCESS(r),
-        ACCESS(w),
-        ACCESS(rw),
-        ACCESS(x),
-        ACCESS(rx),
-        ACCESS(wx),
-        ACCESS(rwx),
-        ACCESS(rx2rw),
-        ACCESS(n2rwx),
-#undef ACCESS
-    };
-
-    switch ( xaccess )
-    {
-    case 0 ... ARRAY_SIZE(memaccess) - 1:
-        *paccess = memaccess[xaccess];
-        break;
-    case XENMEM_access_default:
-        *paccess = p2m->default_access;
-        break;
-    default:
-        return false;
-    }
-
-    return true;
-}
-
-/*
- * Set access type for a region of gfns.
- * If gfn == INVALID_GFN, sets the default access type.
- */
-long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
-                        uint32_t start, uint32_t mask, xenmem_access_t access,
-                        unsigned int altp2m_idx)
-{
-    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
-    p2m_access_t a;
-    unsigned long gfn_l;
-    long rc = 0;
-
-    /* altp2m view 0 is treated as the hostp2m */
-    if ( altp2m_idx )
-    {
-        if ( altp2m_idx >= MAX_ALTP2M ||
-             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
-            return -EINVAL;
-
-        ap2m = d->arch.altp2m_p2m[altp2m_idx];
-    }
-
-    if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
-        return -EINVAL;
-
-    /* If request to set default access. */
-    if ( gfn_eq(gfn, INVALID_GFN) )
-    {
-        p2m->default_access = a;
-        return 0;
-    }
-
-    p2m_lock(p2m);
-    if ( ap2m )
-        p2m_lock(ap2m);
-
-    for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
-    {
-        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
-
-        if ( rc )
-            break;
-
-        /* Check for continuation if it's not the last iteration. */
-        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
-        {
-            rc = start;
-            break;
-        }
-    }
-
-    if ( ap2m )
-        p2m_unlock(ap2m);
-    p2m_unlock(p2m);
-
-    return rc;
-}
-
-long p2m_set_mem_access_multi(struct domain *d,
-                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
-                              const XEN_GUEST_HANDLE(const_uint8) access_list,
-                              uint32_t nr, uint32_t start, uint32_t mask,
-                              unsigned int altp2m_idx)
-{
-    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
-    long rc = 0;
-
-    /* altp2m view 0 is treated as the hostp2m */
-    if ( altp2m_idx )
-    {
-        if ( altp2m_idx >= MAX_ALTP2M ||
-             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
-            return -EINVAL;
-
-        ap2m = d->arch.altp2m_p2m[altp2m_idx];
-    }
-
-    p2m_lock(p2m);
-    if ( ap2m )
-        p2m_lock(ap2m);
-
-    while ( start < nr )
-    {
-        p2m_access_t a;
-        uint8_t access;
-        uint64_t gfn_l;
-
-        if ( copy_from_guest_offset(&gfn_l, pfn_list, start, 1) ||
-             copy_from_guest_offset(&access, access_list, start, 1) )
-        {
-            rc = -EFAULT;
-            break;
-        }
-
-        if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
-        {
-            rc = -EINVAL;
-            break;
-        }
-
-        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
-
-        if ( rc )
-            break;
-
-        /* Check for continuation if it's not the last iteration. */
-        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
-        {
-            rc = start;
-            break;
-        }
-    }
-
-    if ( ap2m )
-        p2m_unlock(ap2m);
-    p2m_unlock(p2m);
-
-    return rc;
-}
-
-/*
- * Get access type for a gfn.
- * If gfn == INVALID_GFN, gets the default access type.
- */
-int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access)
-{
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    p2m_type_t t;
-    p2m_access_t a;
-    mfn_t mfn;
-
-    static const xenmem_access_t memaccess[] = {
-#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
-            ACCESS(n),
-            ACCESS(r),
-            ACCESS(w),
-            ACCESS(rw),
-            ACCESS(x),
-            ACCESS(rx),
-            ACCESS(wx),
-            ACCESS(rwx),
-            ACCESS(rx2rw),
-            ACCESS(n2rwx),
-#undef ACCESS
-    };
-
-    /* If request to get default access. */
-    if ( gfn_eq(gfn, INVALID_GFN) )
-    {
-        *access = memaccess[p2m->default_access];
-        return 0;
-    }
-
-    gfn_lock(p2m, gfn, 0);
-    mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL);
-    gfn_unlock(p2m, gfn, 0);
-
-    if ( mfn_eq(mfn, INVALID_MFN) )
-        return -ESRCH;
-    
-    if ( (unsigned) a >= ARRAY_SIZE(memaccess) )
-        return -ERANGE;
-
-    *access =  memaccess[a];
-    return 0;
-}
-
 static struct p2m_domain *
 p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
 {
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 1e88d67..152e1e8 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -19,6 +19,7 @@
  */
 
 #include <asm/p2m.h>
+#include <asm/mem_access.h>
 #include <asm/vm_event.h>
 
 /* Implicitly serialized by the domctl lock. */
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 565a320..8bad4b4 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -24,8 +24,9 @@
 #include <xen/guest_access.h>
 #include <xen/hypercall.h>
 #include <xen/vm_event.h>
+#include <xen/mem_access.h>
 #include <public/memory.h>
-#include <asm/p2m.h>
+#include <asm/mem_access.h>
 #include <xsm/xsm.h>
 
 int mem_access_memop(unsigned long cmd,
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 907ab40..8a22436 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -28,6 +28,7 @@
 #include <asm/p2m.h>
 #include <asm/monitor.h>
 #include <asm/vm_event.h>
+#include <asm/mem_access.h>
 #include <xsm/xsm.h>
 
 /* for public/io/ring.h macros */
diff --git a/xen/include/asm-arm/mem_access.h b/xen/include/asm-arm/mem_access.h
new file mode 100644
index 0000000..0f44bc1
--- /dev/null
+++ b/xen/include/asm-arm/mem_access.h
@@ -0,0 +1,60 @@
+/*
+ * mem_access.h: architecture specific mem_access handling routines
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _XEN_ARM_MEM_ACCESS_H
+#define _XEN_ARM_MEM_ACCESS_H
+
+#include <public/vm_event.h> /* for vm_event_response_t */
+#include <public/memory.h>
+
+static inline
+bool p2m_mem_access_emulate_check(struct vcpu *v,
+                                  const vm_event_response_t *rsp)
+{
+    /* Not supported on ARM. */
+    return 0;
+}
+
+/* vm_event and mem_access are supported on any ARM guest */
+static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
+{
+    return 1;
+}
+
+/*
+ * Send mem event based on the access. Boolean return value indicates if trap
+ * needs to be injected into guest.
+ */
+bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec);
+
+p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn);
+
+int p2m_mem_access_radix_set(struct p2m_domain *p2m, gfn_t gfn,
+                             p2m_access_t a);
+
+struct page_info*
+p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag);
+
+#endif /* _XEN_ARM_MEM_ACCESS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index fdb6b47..2b22e9a 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -4,6 +4,7 @@
 #include <xen/mm.h>
 #include <xen/radix-tree.h>
 #include <xen/rwlock.h>
+#include <xen/mem_access.h>
 #include <public/vm_event.h> /* for vm_event_response_t */
 #include <public/memory.h>
 #include <xen/p2m-common.h>
@@ -139,14 +140,6 @@ typedef enum {
                              p2m_to_mask(p2m_map_foreign)))
 
 static inline
-bool p2m_mem_access_emulate_check(struct vcpu *v,
-                                  const vm_event_response_t *rsp)
-{
-    /* Not supported on ARM. */
-    return 0;
-}
-
-static inline
 void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
 {
     /* Not supported on ARM. */
@@ -343,22 +336,26 @@ static inline int get_page_and_type(struct page_info *page,
 /* get host p2m table */
 #define p2m_get_hostp2m(d) (&(d)->arch.p2m)
 
-/* vm_event and mem_access are supported on any ARM guest */
-static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
-{
-    return 1;
-}
-
 static inline bool_t p2m_vm_event_sanity_check(struct domain *d)
 {
     return 1;
 }
 
 /*
- * Send mem event based on the access. Boolean return value indicates if trap
- * needs to be injected into guest.
+ * Return the start of the next mapping based on the order of the
+ * current one.
  */
-bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec);
+static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
+{
+    /*
+     * The order corresponds to the order of the mapping (or invalid
+     * range) in the page table. So we need to align the GFN before
+     * incrementing.
+     */
+    gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
+
+    return gfn_add(gfn, 1UL << order);
+}
 
 #endif /* _XEN_P2M_H */
 
diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h
new file mode 100644
index 0000000..9f7b409
--- /dev/null
+++ b/xen/include/asm-x86/mem_access.h
@@ -0,0 +1,61 @@
+/******************************************************************************
+ * include/asm-x86/mem_access.h
+ *
+ * Memory access support.
+ *
+ * Copyright (c) 2011 GridCentric Inc. (Andres Lagar-Cavilla)
+ * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
+ * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
+ * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
+ * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_X86_MEM_ACCESS_H__
+#define __ASM_X86_MEM_ACCESS_H__
+
+/*
+ * Setup vm_event request based on the access (gla is -1ull if not available).
+ * Handles the rw2rx conversion. Boolean return value indicates if event type
+ * is syncronous (aka. requires vCPU pause). If the req_ptr has been populated,
+ * then the caller should use monitor_traps to send the event on the MONITOR
+ * ring. Once having released get_gfn* locks caller must also xfree the
+ * request.
+ */
+bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+                            struct npfec npfec,
+                            vm_event_request_t **req_ptr);
+
+/* Check for emulation and mark vcpu for skipping one instruction
+ * upon rescheduling if required. */
+bool p2m_mem_access_emulate_check(struct vcpu *v,
+                                  const vm_event_response_t *rsp);
+
+/* Sanity check for mem_access hardware support */
+static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
+{
+    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
+}
+
+#endif /*__ASM_X86_MEM_ACCESS_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 7035860..de456d0 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -28,6 +28,7 @@
 
 #include <xen/config.h>
 #include <xen/paging.h>
+#include <xen/mem_access.h>
 #include <xen/p2m-common.h>
 #include <asm/mem_sharing.h>
 #include <asm/page.h>    /* for pagetable_t */
@@ -663,29 +664,6 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long gfn, uint64_t buffer);
 /* Resume normal operation (in case a domain was paused) */
 void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp);
 
-/*
- * Setup vm_event request based on the access (gla is -1ull if not available).
- * Handles the rw2rx conversion. Boolean return value indicates if event type
- * is syncronous (aka. requires vCPU pause). If the req_ptr has been populated,
- * then the caller should use monitor_traps to send the event on the MONITOR
- * ring. Once having released get_gfn* locks caller must also xfree the
- * request.
- */
-bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
-                            struct npfec npfec,
-                            vm_event_request_t **req_ptr);
-
-/* Check for emulation and mark vcpu for skipping one instruction
- * upon rescheduling if required. */
-bool p2m_mem_access_emulate_check(struct vcpu *v,
-                                  const vm_event_response_t *rsp);
-
-/* Sanity check for mem_access hardware support */
-static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
-{
-    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
-}
-
 /* 
  * Internal functions, only called by other p2m code
  */
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index da36e07..27e5f5a 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -19,29 +19,75 @@
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef _XEN_ASM_MEM_ACCESS_H
-#define _XEN_ASM_MEM_ACCESS_H
+#ifndef _XEN_MEM_ACCESS_H
+#define _XEN_MEM_ACCESS_H
 
 #include <public/memory.h>
-#include <asm/p2m.h>
+#include <public/vm_event.h>
 
-#ifdef CONFIG_HAS_MEM_ACCESS
+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given my the p2m_type_t memory type.  Violations
+ * caused by p2m_access_t restrictions are sent to the vm_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambiguous change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other
+ * convenient type, the access permissions can get reset to the p2m_domain
+ * default.
+ */
+typedef enum {
+    /* Code uses bottom three bits with bitmask semantics */
+    p2m_access_n     = 0, /* No access allowed. */
+    p2m_access_r     = 1 << 0,
+    p2m_access_w     = 1 << 1,
+    p2m_access_x     = 1 << 2,
+    p2m_access_rw    = p2m_access_r | p2m_access_w,
+    p2m_access_rx    = p2m_access_r | p2m_access_x,
+    p2m_access_wx    = p2m_access_w | p2m_access_x,
+    p2m_access_rwx   = p2m_access_r | p2m_access_w | p2m_access_x,
+
+    p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
+    p2m_access_n2rwx = 9, /* Special: page goes from N to RWX on access, *
+                           * generates an event but does not pause the
+                           * vcpu */
+
+    /* NOTE: Assumed to be only 4 bits right now on x86. */
+} p2m_access_t;
+
+/*
+ * Set access type for a region of gfns.
+ * If gfn == INVALID_GFN, sets the default access type.
+ */
+long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, xenmem_access_t access,
+                        unsigned int altp2m_idx);
 
+long p2m_set_mem_access_multi(struct domain *d,
+                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
+                              const XEN_GUEST_HANDLE(const_uint8) access_list,
+                              uint32_t nr, uint32_t start, uint32_t mask,
+                              unsigned int altp2m_idx);
+
+/*
+ * Get access type for a gfn.
+ * If gfn == INVALID_GFN, gets the default access type.
+ */
+int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access);
+
+#ifdef CONFIG_HAS_MEM_ACCESS
 int mem_access_memop(unsigned long cmd,
                      XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
-
 #else
-
 static inline
 int mem_access_memop(unsigned long cmd,
                      XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
 {
     return -ENOSYS;
 }
+#endif /* CONFIG_HAS_MEM_ACCESS */
 
-#endif /* HAS_MEM_ACCESS */
-
-#endif /* _XEN_ASM_MEM_ACCESS_H */
+#endif /* _XEN_MEM_ACCESS_H */
 
 /*
  * Local variables:
diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
index 3be1e91..8cd5a6b 100644
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -1,38 +1,6 @@
 #ifndef _XEN_P2M_COMMON_H
 #define _XEN_P2M_COMMON_H
 
-#include <public/vm_event.h>
-
-/*
- * Additional access types, which are used to further restrict
- * the permissions given my the p2m_type_t memory type.  Violations
- * caused by p2m_access_t restrictions are sent to the vm_event
- * interface.
- *
- * The access permissions are soft state: when any ambiguous change of page
- * type or use occurs, or when pages are flushed, swapped, or at any other
- * convenient type, the access permissions can get reset to the p2m_domain
- * default.
- */
-typedef enum {
-    /* Code uses bottom three bits with bitmask semantics */
-    p2m_access_n     = 0, /* No access allowed. */
-    p2m_access_r     = 1 << 0,
-    p2m_access_w     = 1 << 1,
-    p2m_access_x     = 1 << 2,
-    p2m_access_rw    = p2m_access_r | p2m_access_w,
-    p2m_access_rx    = p2m_access_r | p2m_access_x,
-    p2m_access_wx    = p2m_access_w | p2m_access_x,
-    p2m_access_rwx   = p2m_access_r | p2m_access_w | p2m_access_x,
-
-    p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
-    p2m_access_n2rwx = 9, /* Special: page goes from N to RWX on access, *
-                           * generates an event but does not pause the
-                           * vcpu */
-
-    /* NOTE: Assumed to be only 4 bits right now on x86. */
-} p2m_access_t;
-
 /* Map MMIO regions in the p2m: start_gfn and nr describe the range in
  *  * the guest physical address space to map, starting from the machine
  *   * frame number mfn. */
@@ -45,24 +13,4 @@ int unmap_mmio_regions(struct domain *d,
                        unsigned long nr,
                        mfn_t mfn);
 
-/*
- * Set access type for a region of gfns.
- * If gfn == INVALID_GFN, sets the default access type.
- */
-long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
-                        uint32_t start, uint32_t mask, xenmem_access_t access,
-                        unsigned int altp2m_idx);
-
-long p2m_set_mem_access_multi(struct domain *d,
-                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
-                              const XEN_GUEST_HANDLE(const_uint8) access_list,
-                              uint32_t nr, uint32_t start, uint32_t mask,
-                              unsigned int altp2m_idx);
-
-/*
- * Get access type for a gfn.
- * If gfn == INVALID_GFN, gets the default access type.
- */
-int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access);
-
 #endif /* _XEN_P2M_COMMON_H */
-- 
2.10.2


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] p2m: split mem_access into separate files
  2016-12-08 22:57 [PATCH] p2m: split mem_access into separate files Tamas K Lengyel
@ 2016-12-09  8:35 ` Razvan Cojocaru
  2016-12-09  9:27 ` Jan Beulich
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 8+ messages in thread
From: Razvan Cojocaru @ 2016-12-09  8:35 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Andrew Cooper, Julien Grall, Stefano Stabellini,
	Jan Beulich

On 12/09/2016 12:57 AM, Tamas K Lengyel wrote:
> This patch relocates mem_access components that are currently mixed with p2m
> code into separate files. This better aligns the code with similar subsystems,
> such as mem_sharing and mem_paging, which are already in separate files. There
> are no code-changes introduced, the patch is mechanical code movement.
> 
> On ARM we also relocate the static inline gfn_next_boundary function to p2m.h
> as it is a function the mem_access code needs access to.
> 
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>

Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] p2m: split mem_access into separate files
  2016-12-08 22:57 [PATCH] p2m: split mem_access into separate files Tamas K Lengyel
  2016-12-09  8:35 ` Razvan Cojocaru
@ 2016-12-09  9:27 ` Jan Beulich
  2016-12-09 18:38   ` Tamas K Lengyel
  2016-12-09 10:08 ` Julien Grall
  2016-12-13 10:08 ` George Dunlap
  3 siblings, 1 reply; 8+ messages in thread
From: Jan Beulich @ 2016-12-09  9:27 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Stefano Stabellini, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Julien Grall, xen-devel

>>> On 08.12.16 at 23:57, <tamas.lengyel@zentific.com> wrote:
> --- a/xen/arch/x86/mm/Makefile
> +++ b/xen/arch/x86/mm/Makefile
> @@ -9,6 +9,7 @@ obj-y += guest_walk_3.o
>  obj-y += guest_walk_4.o
>  obj-y += mem_paging.o
>  obj-y += mem_sharing.o
> +obj-y += mem_access.o

Please honor prior (mostly?) alphabetical ordering.

> --- a/xen/common/mem_access.c
> +++ b/xen/common/mem_access.c
> @@ -24,8 +24,9 @@
>  #include <xen/guest_access.h>
>  #include <xen/hypercall.h>
>  #include <xen/vm_event.h>
> +#include <xen/mem_access.h>
>  #include <public/memory.h>
> -#include <asm/p2m.h>
> +#include <asm/mem_access.h>
>  #include <xsm/xsm.h>

Normally asm/ includes xen/ of the same name or the other way
around, depending on how they relate to one another; you
shouldn't ever need both includes, and I'd be surprised if the
two headers really are (even conceptionally) completely
independent of each other.

Otherwise this all looks like pure code motion (except for the
adjustments described), but it would be nice if you could
clarify that's indeed (intended to be) the case.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] p2m: split mem_access into separate files
  2016-12-08 22:57 [PATCH] p2m: split mem_access into separate files Tamas K Lengyel
  2016-12-09  8:35 ` Razvan Cojocaru
  2016-12-09  9:27 ` Jan Beulich
@ 2016-12-09 10:08 ` Julien Grall
  2016-12-13 10:08 ` George Dunlap
  3 siblings, 0 replies; 8+ messages in thread
From: Julien Grall @ 2016-12-09 10:08 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Andrew Cooper, Stefano Stabellini, Jan Beulich,
	Razvan Cojocaru

Hi Tamas,

On 08/12/16 22:57, Tamas K Lengyel wrote:
> This patch relocates mem_access components that are currently mixed with p2m
> code into separate files. This better aligns the code with similar subsystems,
> such as mem_sharing and mem_paging, which are already in separate files. There
> are no code-changes introduced, the patch is mechanical code movement.

Whilst I agree this is a good move in general, the ARM (both 32bits and 
64bits) code deserves more attention to make it work in *all* the case. 
It would have been nice to show that by addressing my concerns when you 
first suggested this move (see [1]).

>
> On ARM we also relocate the static inline gfn_next_boundary function to p2m.h
> as it is a function the mem_access code needs access to.
>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>

[...]

> +/*
> + * Lookup the MFN corresponding to a domain's GFN.
> + * Lookup mem access in the ratrix tree.
> + * The entries associated to the GFN is considered valid.
> + */
> +p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn)

[...]

> +int p2m_mem_access_radix_set(struct p2m_domain *p2m, gfn_t gfn,
> +                             p2m_access_t a)

[...]

The radix tree functions should stay in p2m.c as they are tie to stage-2 
page table handling.

> +
> +/*
> + * If mem_access is in use it might have been the reason why get_page_from_gva
> + * failed to fetch the page, as it uses the MMU for the permission checking.
> + * Only in these cases we do a software-based type check and fetch the page if
> + * we indeed found a conflicting mem_access setting.
> + */
> +struct page_info*
> +p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag)

This interface should take a vCPU in parameter and not rely on current.

Also, p2m_mem_access_check_get_page is using the hardware to translate a 
VA to an IPA. This only works if the memory where the stage-1 page 
tables reside is not protected. The upcoming support of altp2m will make 
things trickier.

[1] 
https://lists.xenproject.org/archives/html/xen-devel/2016-08/msg00037.html

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] p2m: split mem_access into separate files
  2016-12-09  9:27 ` Jan Beulich
@ 2016-12-09 18:38   ` Tamas K Lengyel
  2016-12-12  7:08     ` Jan Beulich
  0 siblings, 1 reply; 8+ messages in thread
From: Tamas K Lengyel @ 2016-12-09 18:38 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Julien Grall, xen-devel

On Fri, Dec 9, 2016 at 2:27 AM, Jan Beulich <JBeulich@suse.com> wrote:
>>>> On 08.12.16 at 23:57, <tamas.lengyel@zentific.com> wrote:
>> --- a/xen/arch/x86/mm/Makefile
>> +++ b/xen/arch/x86/mm/Makefile
>> @@ -9,6 +9,7 @@ obj-y += guest_walk_3.o
>>  obj-y += guest_walk_4.o
>>  obj-y += mem_paging.o
>>  obj-y += mem_sharing.o
>> +obj-y += mem_access.o
>
> Please honor prior (mostly?) alphabetical ordering.

I don't think there is any alphabetical ordering here. The list begins
with paging.o then goes to altp2m.o and then to guest_walk_2.o.. IMHO
sorting the list is something that should be done in a separate patch.

>
>> --- a/xen/common/mem_access.c
>> +++ b/xen/common/mem_access.c
>> @@ -24,8 +24,9 @@
>>  #include <xen/guest_access.h>
>>  #include <xen/hypercall.h>
>>  #include <xen/vm_event.h>
>> +#include <xen/mem_access.h>
>>  #include <public/memory.h>
>> -#include <asm/p2m.h>
>> +#include <asm/mem_access.h>
>>  #include <xsm/xsm.h>
>
> Normally asm/ includes xen/ of the same name or the other way
> around, depending on how they relate to one another; you
> shouldn't ever need both includes, and I'd be surprised if the
> two headers really are (even conceptionally) completely
> independent of each other.

Sure, xen/mem_access.h can include the asm specific one.

>
> Otherwise this all looks like pure code motion (except for the
> adjustments described), but it would be nice if you could
> clarify that's indeed (intended to be) the case.

I do say in the commit message this is mechanical code motion: "There
are no code-changes introduced, the patch is mechanical code
movement."

>
> Jan
>

Thanks,
Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] p2m: split mem_access into separate files
  2016-12-09 18:38   ` Tamas K Lengyel
@ 2016-12-12  7:08     ` Jan Beulich
  2016-12-12  7:55       ` Tamas K Lengyel
  0 siblings, 1 reply; 8+ messages in thread
From: Jan Beulich @ 2016-12-12  7:08 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Stefano Stabellini, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Julien Grall, xen-devel

>>> On 09.12.16 at 19:38, <tamas.lengyel@zentific.com> wrote:
> On Fri, Dec 9, 2016 at 2:27 AM, Jan Beulich <JBeulich@suse.com> wrote:
>>>>> On 08.12.16 at 23:57, <tamas.lengyel@zentific.com> wrote:
>>> --- a/xen/arch/x86/mm/Makefile
>>> +++ b/xen/arch/x86/mm/Makefile
>>> @@ -9,6 +9,7 @@ obj-y += guest_walk_3.o
>>>  obj-y += guest_walk_4.o
>>>  obj-y += mem_paging.o
>>>  obj-y += mem_sharing.o
>>> +obj-y += mem_access.o
>>
>> Please honor prior (mostly?) alphabetical ordering.
> 
> I don't think there is any alphabetical ordering here. The list begins
> with paging.o then goes to altp2m.o and then to guest_walk_2.o.. IMHO
> sorting the list is something that should be done in a separate patch.

Hence my "(partly?)" - the patch context alone tells that you could
at least not make things worse.

>> Otherwise this all looks like pure code motion (except for the
>> adjustments described), but it would be nice if you could
>> clarify that's indeed (intended to be) the case.
> 
> I do say in the commit message this is mechanical code motion: "There
> are no code-changes introduced, the patch is mechanical code
> movement."

Oh, I'm sorry - I'm sure I've looked over the description a 2nd
time before making the comment, but I still must have skipped
that last sentence in the first paragraph.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] p2m: split mem_access into separate files
  2016-12-12  7:08     ` Jan Beulich
@ 2016-12-12  7:55       ` Tamas K Lengyel
  0 siblings, 0 replies; 8+ messages in thread
From: Tamas K Lengyel @ 2016-12-12  7:55 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Julien Grall, xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 1528 bytes --]

On Dec 12, 2016 00:08, "Jan Beulich" <JBeulich@suse.com> wrote:

>>> On 09.12.16 at 19:38, <tamas.lengyel@zentific.com> wrote:
> On Fri, Dec 9, 2016 at 2:27 AM, Jan Beulich <JBeulich@suse.com> wrote:
>>>>> On 08.12.16 at 23:57, <tamas.lengyel@zentific.com> wrote:
>>> --- a/xen/arch/x86/mm/Makefile
>>> +++ b/xen/arch/x86/mm/Makefile
>>> @@ -9,6 +9,7 @@ obj-y += guest_walk_3.o
>>>  obj-y += guest_walk_4.o
>>>  obj-y += mem_paging.o
>>>  obj-y += mem_sharing.o
>>> +obj-y += mem_access.o
>>
>> Please honor prior (mostly?) alphabetical ordering.
>
> I don't think there is any alphabetical ordering here. The list begins
> with paging.o then goes to altp2m.o and then to guest_walk_2.o.. IMHO
> sorting the list is something that should be done in a separate patch.

Hence my "(partly?)" - the patch context alone tells that you could
at least not make things worse.


I think this is one of those changes that I would rather not submit a new
version for unless something else comes up as well.


>> Otherwise this all looks like pure code motion (except for the
>> adjustments described), but it would be nice if you could
>> clarify that's indeed (intended to be) the case.
>
> I do say in the commit message this is mechanical code motion: "There
> are no code-changes introduced, the patch is mechanical code
> movement."

Oh, I'm sorry - I'm sure I've looked over the description a 2nd
time before making the comment, but I still must have skipped
that last sentence in the first paragraph.


No problem ;)

Thanks,
Tamas

[-- Attachment #1.2: Type: text/html, Size: 2744 bytes --]

[-- Attachment #2: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] p2m: split mem_access into separate files
  2016-12-08 22:57 [PATCH] p2m: split mem_access into separate files Tamas K Lengyel
                   ` (2 preceding siblings ...)
  2016-12-09 10:08 ` Julien Grall
@ 2016-12-13 10:08 ` George Dunlap
  3 siblings, 0 replies; 8+ messages in thread
From: George Dunlap @ 2016-12-13 10:08 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Stefano Stabellini, Razvan Cojocaru, Andrew Cooper,
	George Dunlap, Julien Grall, Jan Beulich, xen-devel


> On Dec 9, 2016, at 6:57 AM, Tamas K Lengyel <tamas.lengyel@zentific.com> wrote:
> 
> This patch relocates mem_access components that are currently mixed with p2m
> code into separate files. This better aligns the code with similar subsystems,
> such as mem_sharing and mem_paging, which are already in separate files. There
> are no code-changes introduced, the patch is mechanical code movement.
> 
> On ARM we also relocate the static inline gfn_next_boundary function to p2m.h
> as it is a function the mem_access code needs access to.
> 
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>

Acked-by: George Dunlap <george.dunlap@citrix.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2016-12-13 10:08 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-08 22:57 [PATCH] p2m: split mem_access into separate files Tamas K Lengyel
2016-12-09  8:35 ` Razvan Cojocaru
2016-12-09  9:27 ` Jan Beulich
2016-12-09 18:38   ` Tamas K Lengyel
2016-12-12  7:08     ` Jan Beulich
2016-12-12  7:55       ` Tamas K Lengyel
2016-12-09 10:08 ` Julien Grall
2016-12-13 10:08 ` George Dunlap

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.