All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for 4.10 0/3] XSA-243 followup
@ 2017-10-12 13:54 Andrew Cooper
  2017-10-12 13:54 ` [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory" Andrew Cooper
                   ` (4 more replies)
  0 siblings, 5 replies; 16+ messages in thread
From: Andrew Cooper @ 2017-10-12 13:54 UTC (permalink / raw)
  To: Xen-devel
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Tim Deegan, Julien Grall,
	Jan Beulich

The important change here is in patch 3, which is intended to remove the
correct-but-dangerous-looking construction of linear pagetables slots for
shadowed guests.

Andrew Cooper (3):
  Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out
    pv_arch_init_memory"
  x86/mm: Consolidate all Xen L2 slot writing into
    init_xen_pae_l2_slots()
  x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()

 xen/arch/x86/mm.c              | 144 ++++++++++++++++++++++++++++++++++++---
 xen/arch/x86/mm/hap/hap.c      |  31 ++-------
 xen/arch/x86/mm/shadow/multi.c | 148 +++++++++++------------------------------
 xen/arch/x86/pv/dom0_build.c   |   5 +-
 xen/arch/x86/pv/domain.c       |   7 +-
 xen/arch/x86/pv/mm.c           |  82 -----------------------
 xen/arch/x86/pv/mm.h           |   3 -
 xen/include/asm-x86/mm.h       |   3 +
 xen/include/asm-x86/pv/mm.h    |   4 --
 9 files changed, 187 insertions(+), 240 deletions(-)

-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory"
  2017-10-12 13:54 [PATCH for 4.10 0/3] XSA-243 followup Andrew Cooper
@ 2017-10-12 13:54 ` Andrew Cooper
  2017-10-12 14:46   ` Wei Liu
  2017-10-12 14:55   ` Jan Beulich
  2017-10-12 13:54 ` [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots() Andrew Cooper
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 16+ messages in thread
From: Andrew Cooper @ 2017-10-12 13:54 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Julien Grall, Wei Liu, Jan Beulich

This reverts commit f3b95fd07fdb55b1db091fede1b9a7c71f1eaa1b and
1bd39738a5a34f529a610fb275cc83ee5ac7547a.

The following patches (post XSA-243 fixes) requires init_guest_l4_table()
being common code.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Julien Grall <julien.grall@arm.com>

These pagetable fixes have been pending for longer than the mm cleanup being
reverted here.  Had I been in the office when these patches were proposed, I
would have quietly arranged for them not to be committed.
---
 xen/arch/x86/mm.c            | 77 +++++++++++++++++++++++++++++++++++++++--
 xen/arch/x86/pv/dom0_build.c |  2 --
 xen/arch/x86/pv/domain.c     |  5 ---
 xen/arch/x86/pv/mm.c         | 82 --------------------------------------------
 xen/arch/x86/pv/mm.h         |  3 --
 xen/include/asm-x86/mm.h     |  2 ++
 xen/include/asm-x86/pv/mm.h  |  4 ---
 7 files changed, 77 insertions(+), 98 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 5628bc7..f90a42a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -125,7 +125,6 @@
 
 #include <asm/hvm/grant_table.h>
 #include <asm/pv/grant_table.h>
-#include <asm/pv/mm.h>
 
 #include "pv/mm.h"
 
@@ -243,6 +242,14 @@ void __init init_frametable(void)
     memset(end_pg, -1, (unsigned long)top_pg - (unsigned long)end_pg);
 }
 
+#ifndef NDEBUG
+static unsigned int __read_mostly root_pgt_pv_xen_slots
+    = ROOT_PAGETABLE_PV_XEN_SLOTS;
+static l4_pgentry_t __read_mostly split_l4e;
+#else
+#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
+#endif
+
 void __init arch_init_memory(void)
 {
     unsigned long i, pfn, rstart_pfn, rend_pfn, iostart_pfn, ioend_pfn;
@@ -338,7 +345,39 @@ void __init arch_init_memory(void)
 
     mem_sharing_init();
 
-    pv_arch_init_memory();
+#ifndef NDEBUG
+    if ( highmem_start )
+    {
+        unsigned long split_va = (unsigned long)__va(highmem_start);
+
+        if ( split_va < HYPERVISOR_VIRT_END &&
+             split_va - 1 == (unsigned long)__va(highmem_start - 1) )
+        {
+            root_pgt_pv_xen_slots = l4_table_offset(split_va) -
+                                    ROOT_PAGETABLE_FIRST_XEN_SLOT;
+            ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
+            if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
+            {
+                l3_pgentry_t *l3tab = alloc_xen_pagetable();
+
+                if ( l3tab )
+                {
+                    const l3_pgentry_t *l3idle =
+                        l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
+
+                    for ( i = 0; i < l3_table_offset(split_va); ++i )
+                        l3tab[i] = l3idle[i];
+                    for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
+                        l3tab[i] = l3e_empty();
+                    split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
+                                             __PAGE_HYPERVISOR_RW);
+                }
+                else
+                    ++root_pgt_pv_xen_slots;
+            }
+        }
+    }
+#endif
 }
 
 int page_is_ram_type(unsigned long mfn, unsigned long mem_type)
@@ -1479,6 +1518,40 @@ static int alloc_l3_table(struct page_info *page)
     return rc > 0 ? 0 : rc;
 }
 
+/*
+ * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
+ * values a guest may have left there from alloc_l4_table().
+ */
+void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
+                         bool zap_ro_mpt)
+{
+    /* Xen private mappings. */
+    memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+           root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
+#ifndef NDEBUG
+    if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
+    {
+        l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
+                                    root_pgt_pv_xen_slots];
+
+        if ( l4e_get_intpte(split_l4e) )
+            *next++ = split_l4e;
+
+        memset(next, 0,
+               _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
+    }
+#else
+    BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
+#endif
+    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
+        l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
+    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
+        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
+    if ( zap_ro_mpt || is_pv_32bit_domain(d) )
+        l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+}
+
 bool fill_ro_mpt(mfn_t mfn)
 {
     l4_pgentry_t *l4tab = map_domain_page(mfn);
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index dcbee43..ec7f96d 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -20,8 +20,6 @@
 #include <asm/page.h>
 #include <asm/setup.h>
 
-#include "mm.h"
-
 /* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
 #define BASE_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
 #define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL)
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 90d5569..c8b9cb6 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -9,13 +9,8 @@
 #include <xen/lib.h>
 #include <xen/sched.h>
 
-#include <asm/p2m.h>
-#include <asm/paging.h>
-#include <asm/setup.h>
 #include <asm/pv/domain.h>
 
-#include "mm.h"
-
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef mfn_to_page
 #define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index e45d628..6890e80 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -23,7 +23,6 @@
 
 #include <asm/current.h>
 #include <asm/p2m.h>
-#include <asm/setup.h>
 
 #include "mm.h"
 
@@ -134,87 +133,6 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
     return true;
 }
 
-#ifndef NDEBUG
-static unsigned int __read_mostly root_pgt_pv_xen_slots
-    = ROOT_PAGETABLE_PV_XEN_SLOTS;
-static l4_pgentry_t __read_mostly split_l4e;
-#else
-#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
-#endif
-
-/*
- * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
- * values a guest may have left there from alloc_l4_table().
- */
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
-                         bool zap_ro_mpt)
-{
-    /* Xen private mappings. */
-    memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
-#ifndef NDEBUG
-    if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
-    {
-        l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
-                                    root_pgt_pv_xen_slots];
-
-        if ( l4e_get_intpte(split_l4e) )
-            *next++ = split_l4e;
-
-        memset(next, 0,
-               _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
-    }
-#else
-    BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
-#endif
-    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
-    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
-    if ( zap_ro_mpt || is_pv_32bit_domain(d) )
-        l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
-}
-
-void pv_arch_init_memory(void)
-{
-#ifndef NDEBUG
-    unsigned int i;
-
-    if ( highmem_start )
-    {
-        unsigned long split_va = (unsigned long)__va(highmem_start);
-
-        if ( split_va < HYPERVISOR_VIRT_END &&
-             split_va - 1 == (unsigned long)__va(highmem_start - 1) )
-        {
-            root_pgt_pv_xen_slots = l4_table_offset(split_va) -
-                                    ROOT_PAGETABLE_FIRST_XEN_SLOT;
-            ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
-            if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
-            {
-                l3_pgentry_t *l3tab = alloc_xen_pagetable();
-
-                if ( l3tab )
-                {
-                    const l3_pgentry_t *l3idle =
-                        l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
-
-                    for ( i = 0; i < l3_table_offset(split_va); ++i )
-                        l3tab[i] = l3idle[i];
-                    for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
-                        l3tab[i] = l3e_empty();
-                    split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
-                                             __PAGE_HYPERVISOR_RW);
-                }
-                else
-                    ++root_pgt_pv_xen_slots;
-            }
-        }
-    }
-#endif
-}
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/pv/mm.h b/xen/arch/x86/pv/mm.h
index 169c9e0..7502d53 100644
--- a/xen/arch/x86/pv/mm.h
+++ b/xen/arch/x86/pv/mm.h
@@ -3,9 +3,6 @@
 
 l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn);
 
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
-                         bool zap_ro_mpt);
-
 int new_guest_cr3(mfn_t mfn);
 
 /* Read a PV guest's l1e that maps this linear address. */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 26f0153..eeac4d7 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -340,6 +340,8 @@ static inline void *__page_to_virt(const struct page_info *pg)
 int free_page_type(struct page_info *page, unsigned long type,
                    int preemptible);
 
+void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
+                         bool_t zap_ro_mpt);
 bool fill_ro_mpt(mfn_t mfn);
 void zap_ro_mpt(mfn_t mfn);
 
diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h
index 07785e0..5d2fe4c 100644
--- a/xen/include/asm-x86/pv/mm.h
+++ b/xen/include/asm-x86/pv/mm.h
@@ -30,8 +30,6 @@ void pv_destroy_gdt(struct vcpu *v);
 
 bool pv_map_ldt_shadow_page(unsigned int off);
 
-void pv_arch_init_memory(void);
-
 #else
 
 #include <xen/errno.h>
@@ -51,8 +49,6 @@ static inline void pv_destroy_gdt(struct vcpu *v) { ASSERT_UNREACHABLE(); }
 
 static inline bool pv_map_ldt_shadow_page(unsigned int off) { return false; }
 
-static inline void pv_arch_init_memory(void) {}
-
 #endif
 
 #endif /* __X86_PV_MM_H__ */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots()
  2017-10-12 13:54 [PATCH for 4.10 0/3] XSA-243 followup Andrew Cooper
  2017-10-12 13:54 ` [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory" Andrew Cooper
@ 2017-10-12 13:54 ` Andrew Cooper
  2017-10-12 14:57   ` Jan Beulich
                     ` (2 more replies)
  2017-10-12 13:54 ` [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
                   ` (2 subsequent siblings)
  4 siblings, 3 replies; 16+ messages in thread
From: Andrew Cooper @ 2017-10-12 13:54 UTC (permalink / raw)
  To: Xen-devel
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Tim Deegan, Julien Grall,
	Jan Beulich

Having all of this logic together makes it easier to follow Xen's virtual
setup across the whole system.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Julien Grall <julien.grall@arm.com>
---
 xen/arch/x86/mm.c              | 16 +++++++++-------
 xen/arch/x86/mm/shadow/multi.c | 42 +++++++++++++++---------------------------
 xen/include/asm-x86/mm.h       |  1 +
 3 files changed, 25 insertions(+), 34 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index f90a42a..ea4af16 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1433,13 +1433,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type,
     }
 
     if ( rc >= 0 && (type & PGT_pae_xen_l2) )
-    {
-        /* Xen private mappings. */
-        memcpy(&pl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
-               &compat_idle_pg_table_l2[
-                   l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
-               COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*pl2e));
-    }
+        init_xen_pae_l2_slots(pl2e, d);
 
     unmap_domain_page(pl2e);
     return rc > 0 ? 0 : rc;
@@ -1518,6 +1512,14 @@ static int alloc_l3_table(struct page_info *page)
     return rc > 0 ? 0 : rc;
 }
 
+void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d)
+{
+    memcpy(&l2t[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
+           &compat_idle_pg_table_l2[
+               l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
+           COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2t));
+}
+
 /*
  * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
  * values a guest may have left there from alloc_l4_table().
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index d540af1..1b76e0c 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1521,31 +1521,6 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
 }
 #endif
 
-#if GUEST_PAGING_LEVELS >= 3
-// For 3-on-3 PV guests, we need to make sure the xen mappings are in
-// place, which means that we need to populate the l2h entry in the l3
-// table.
-
-static void sh_install_xen_entries_in_l2h(struct domain *d, mfn_t sl2hmfn)
-{
-    shadow_l2e_t *sl2e;
-
-    if ( !is_pv_32bit_domain(d) )
-        return;
-
-    sl2e = map_domain_page(sl2hmfn);
-    BUILD_BUG_ON(sizeof (l2_pgentry_t) != sizeof (shadow_l2e_t));
-
-    /* Copy the common Xen mappings from the idle domain */
-    memcpy(
-        &sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
-        &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
-        COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
-
-    unmap_domain_page(sl2e);
-}
-#endif
-
 
 /**************************************************************************/
 /* Create a shadow of a given guest page.
@@ -1610,7 +1585,14 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
 #endif
 #if GUEST_PAGING_LEVELS >= 3
         case SH_type_l2h_shadow:
-            sh_install_xen_entries_in_l2h(v->domain, smfn);
+            BUILD_BUG_ON(sizeof(l2_pgentry_t) != sizeof(shadow_l2e_t));
+            if ( is_pv_32bit_domain(d) )
+            {
+                shadow_l2e_t *l2t = map_domain_page(smfn);
+
+                init_xen_pae_l2_slots(l2t, d);
+                unmap_domain_page(l2t);
+            }
             break;
 #endif
         default: /* Do nothing */ break;
@@ -1677,6 +1659,8 @@ sh_make_monitor_table(struct vcpu *v)
 
             if ( is_pv_32bit_domain(d) )
             {
+                l2_pgentry_t *l2t;
+
                 /* For 32-bit PV guests, we need to map the 32-bit Xen
                  * area into its usual VAs in the monitor tables */
                 m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
@@ -1687,7 +1671,11 @@ sh_make_monitor_table(struct vcpu *v)
                 mfn_to_page(m2mfn)->shadow_flags = 2;
                 l3e = map_domain_page(m3mfn);
                 l3e[3] = l3e_from_mfn(m2mfn, _PAGE_PRESENT);
-                sh_install_xen_entries_in_l2h(d, m2mfn);
+
+                l2t = map_domain_page(m2mfn);
+                init_xen_pae_l2_slots(l2t, d);
+                unmap_domain_page(l2t);
+
                 unmap_domain_page(l3e);
             }
 
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index eeac4d7..da3c5e2 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -340,6 +340,7 @@ static inline void *__page_to_virt(const struct page_info *pg)
 int free_page_type(struct page_info *page, unsigned long type,
                    int preemptible);
 
+void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
 void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
                          bool_t zap_ro_mpt);
 bool fill_ro_mpt(mfn_t mfn);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-10-12 13:54 [PATCH for 4.10 0/3] XSA-243 followup Andrew Cooper
  2017-10-12 13:54 ` [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory" Andrew Cooper
  2017-10-12 13:54 ` [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots() Andrew Cooper
@ 2017-10-12 13:54 ` Andrew Cooper
  2017-10-12 15:08   ` Jan Beulich
                     ` (2 more replies)
  2017-10-13  9:47 ` [PATCH for 4.10 0/3] XSA-243 followup Julien Grall
  2017-10-14 17:45 ` Tim Deegan
  4 siblings, 3 replies; 16+ messages in thread
From: Andrew Cooper @ 2017-10-12 13:54 UTC (permalink / raw)
  To: Xen-devel
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Tim Deegan, Julien Grall,
	Jan Beulich

There are currently three functions which write L4 pagetables for Xen, but
they all behave subtly differently.  sh_install_xen_entries_in_l4() in
particular is catering for two different usecases, which makes the safety of
the linear mappings hard to follow.

By consolidating the L4 pagetable writing in a single function, the resulting
setup of Xen's virtual layout is easier to understand.

No practical changes to the resulting L4, although the logic has been
rearranged to avoid rewriting some slots.  This changes the zap_ro_mpt
parameter to simply ro_mpt.

Both {hap,sh}_install_xen_entries_in_l4() get folded into their callers.  The
hap side only a single caller, while the shadow side has two.  The shadow
split helps highlight the correctness of the linear slots.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Julien Grall <julien.grall@arm.com>
---
 xen/arch/x86/mm.c              |  87 +++++++++++++++++++++++++--------
 xen/arch/x86/mm/hap/hap.c      |  31 +++---------
 xen/arch/x86/mm/shadow/multi.c | 106 ++++++++++-------------------------------
 xen/arch/x86/pv/dom0_build.c   |   3 +-
 xen/arch/x86/pv/domain.c       |   2 +-
 xen/include/asm-x86/mm.h       |   4 +-
 6 files changed, 105 insertions(+), 128 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index ea4af16..5097958 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1521,37 +1521,85 @@ void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d)
 }
 
 /*
+ * Fill an L4 with Xen entries.
+ *
  * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
  * values a guest may have left there from alloc_l4_table().
+ *
+ * l4t and l4mfn are mandatory, but l4mfn doesn't need to be the mfn under
+ * *l4t.  All other parameters are optional and will either fill or zero the
+ * appropriate slots.  Pagetables not shared with guests will gain the
+ * extended directmap.
  */
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
-                         bool zap_ro_mpt)
+void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
+                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt)
 {
-    /* Xen private mappings. */
-    memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
+    /*
+     * PV vcpus need a shortened directmap.  HVM and Idle vcpus get the full
+     * directmap.
+     */
+    bool short_directmap = d && !paging_mode_external(d);
+
+    /* Slot 256: RO M2P (if applicable). */
+    l4t[l4_table_offset(RO_MPT_VIRT_START)] =
+        ro_mpt ? idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]
+               : l4e_empty();
+
+    /* Slot 257: PCI MMCFG. */
+    l4t[l4_table_offset(PCI_MCFG_VIRT_START)] =
+        idle_pg_table[l4_table_offset(PCI_MCFG_VIRT_START)];
+
+    /* Slot 258: Self linear mappings. */
+    ASSERT(!mfn_eq(l4mfn, INVALID_MFN));
+    l4t[l4_table_offset(LINEAR_PT_VIRT_START)] =
+        l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
+
+    /* Slot 259: Shadow linear mappings (if applicable) .*/
+    l4t[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
+        mfn_eq(sl4mfn, INVALID_MFN) ? l4e_empty() :
+        l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
+
+    /* Slot 260: Per-domain mappings (if applicable). */
+    l4t[l4_table_offset(PERDOMAIN_VIRT_START)] =
+        d ? l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW)
+          : l4e_empty();
+
+    /* Slot 261-: text/data/bss, RW M2P, vmap, frametable, directmap. */
 #ifndef NDEBUG
-    if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
+    if ( short_directmap &&
+         unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
     {
-        l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
-                                    root_pgt_pv_xen_slots];
+        /*
+         * If using highmem-start=, artificially shorten the directmap to
+         * simulate very large machines.
+         */
+        l4_pgentry_t *next;
+
+        memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
+               &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
+               (ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots -
+                l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
+
+        next = &l4t[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots];
 
         if ( l4e_get_intpte(split_l4e) )
             *next++ = split_l4e;
 
         memset(next, 0,
-               _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
+               _p(&l4t[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
     }
-#else
-    BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
+    else
 #endif
-    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
-    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
-    if ( zap_ro_mpt || is_pv_32bit_domain(d) )
-        l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+    {
+        unsigned int slots = (short_directmap
+                              ? ROOT_PAGETABLE_PV_XEN_SLOTS
+                              : ROOT_PAGETABLE_XEN_SLOTS);
+
+        memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
+               &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
+               (ROOT_PAGETABLE_FIRST_XEN_SLOT + slots -
+                l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
+    }
 }
 
 bool fill_ro_mpt(mfn_t mfn)
@@ -1629,7 +1677,8 @@ static int alloc_l4_table(struct page_info *page)
 
     if ( rc >= 0 )
     {
-        init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+        init_xen_l4_slots(pl4e, _mfn(pfn),
+                          d, INVALID_MFN, VM_ASSIST(d, m2p_strict));
         atomic_inc(&d->arch.pv_domain.nr_l4_pages);
         rc = 0;
     }
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index dc85e82..1e7e8d0 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -391,41 +391,24 @@ int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
     return 0;
 }
 
-static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
-{
-    struct domain *d = v->domain;
-    l4_pgentry_t *l4e;
-
-    l4e = map_domain_page(l4mfn);
-
-    /* Copy the common Xen mappings from the idle domain */
-    memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
-
-    /* Install the per-domain mappings for this domain */
-    l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
-
-    /* Install a linear mapping */
-    l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
-
-    unmap_domain_page(l4e);
-}
-
 static mfn_t hap_make_monitor_table(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct page_info *pg;
+    l4_pgentry_t *l4e;
     mfn_t m4mfn;
 
     ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
 
     if ( (pg = hap_alloc(d)) == NULL )
         goto oom;
+
     m4mfn = page_to_mfn(pg);
-    hap_install_xen_entries_in_l4(v, m4mfn);
+    l4e = __map_domain_page(pg);
+
+    init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
+    unmap_domain_page(l4e);
+
     return m4mfn;
 
  oom:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 1b76e0c..b139d9f 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1448,80 +1448,6 @@ do {                                                                    \
 #endif
 
 
-
-/**************************************************************************/
-/* Functions to install Xen mappings and linear mappings in shadow pages */
-
-// XXX -- this function should probably be moved to shadow-common.c, but that
-//        probably wants to wait until the shadow types have been moved from
-//        shadow-types.h to shadow-private.h
-//
-#if GUEST_PAGING_LEVELS == 4
-void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
-{
-    shadow_l4e_t *sl4e;
-    unsigned int slots;
-
-    sl4e = map_domain_page(sl4mfn);
-    BUILD_BUG_ON(sizeof (l4_pgentry_t) != sizeof (shadow_l4e_t));
-
-    /* Copy the common Xen mappings from the idle domain */
-    slots = (shadow_mode_external(d)
-             ? ROOT_PAGETABLE_XEN_SLOTS
-             : ROOT_PAGETABLE_PV_XEN_SLOTS);
-    memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           slots * sizeof(l4_pgentry_t));
-
-    /* Install the per-domain mappings for this domain */
-    sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] =
-        shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
-                            __PAGE_HYPERVISOR_RW);
-
-    if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
-         !VM_ASSIST(d, m2p_strict) )
-    {
-        /* open coded zap_ro_mpt(mfn_x(sl4mfn)): */
-        sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
-    }
-
-    /*
-     * Linear mapping slots:
-     *
-     * Calling this function with gl4mfn == sl4mfn is used to construct a
-     * monitor table for translated domains.  In this case, gl4mfn forms the
-     * self-linear mapping (i.e. not pointing into the translated domain), and
-     * the shadow-linear slot is skipped.  The shadow-linear slot is either
-     * filled when constructing lower level monitor tables, or via
-     * sh_update_cr3() for 4-level guests.
-     *
-     * Calling this function with gl4mfn != sl4mfn is used for non-translated
-     * guests, where the shadow-linear slot is actually self-linear, and the
-     * guest-linear slot points into the guests view of its pagetables.
-     */
-    if ( shadow_mode_translate(d) )
-    {
-        ASSERT(mfn_eq(gl4mfn, sl4mfn));
-
-        sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
-            shadow_l4e_empty();
-    }
-    else
-    {
-        ASSERT(!mfn_eq(gl4mfn, sl4mfn));
-
-        sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
-            shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
-    }
-
-    sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =
-        shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR_RW);
-
-    unmap_domain_page(sl4e);
-}
-#endif
-
-
 /**************************************************************************/
 /* Create a shadow of a given guest page.
  */
@@ -1580,8 +1506,16 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
         {
 #if GUEST_PAGING_LEVELS == 4
         case SH_type_l4_shadow:
-            sh_install_xen_entries_in_l4(v->domain, gmfn, smfn);
-            break;
+        {
+            shadow_l4e_t *l4t = map_domain_page(smfn);
+
+            BUILD_BUG_ON(sizeof(l4_pgentry_t) != sizeof(shadow_l4e_t));
+
+            init_xen_l4_slots(l4t, gmfn, d, smfn, (!is_pv_32bit_domain(d) &&
+                                                   VM_ASSIST(d, m2p_strict)));
+            unmap_domain_page(l4t);
+        }
+        break;
 #endif
 #if GUEST_PAGING_LEVELS >= 3
         case SH_type_l2h_shadow:
@@ -1632,19 +1566,27 @@ sh_make_monitor_table(struct vcpu *v)
 
     {
         mfn_t m4mfn;
+        l4_pgentry_t *l4e;
+
         m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
-        sh_install_xen_entries_in_l4(d, m4mfn, m4mfn);
-        /* Remember the level of this table */
         mfn_to_page(m4mfn)->shadow_flags = 4;
+
+        l4e = map_domain_page(m4mfn);
+
+        /*
+         * Create a self-linear mapping, but no shadow-linear mapping.  A
+         * shadow-linear mapping will either be inserted below when creating
+         * lower level monitor tables, or later in sh_update_cr3().
+         */
+        init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
+
 #if SHADOW_PAGING_LEVELS < 4
         {
             mfn_t m3mfn, m2mfn;
-            l4_pgentry_t *l4e;
             l3_pgentry_t *l3e;
             /* Install an l3 table and an l2 table that will hold the shadow
              * linear map entries.  This overrides the linear map entry that
              * was installed by sh_install_xen_entries_in_l4. */
-            l4e = map_domain_page(m4mfn);
 
             m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
             mfn_to_page(m3mfn)->shadow_flags = 3;
@@ -1679,9 +1621,11 @@ sh_make_monitor_table(struct vcpu *v)
                 unmap_domain_page(l3e);
             }
 
-            unmap_domain_page(l4e);
         }
 #endif /* SHADOW_PAGING_LEVELS < 4 */
+
+        unmap_domain_page(l4e);
+
         return m4mfn;
     }
 }
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index ec7f96d..ecce175 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -588,7 +588,8 @@ int __init dom0_construct_pv(struct domain *d,
         l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
     }
     clear_page(l4tab);
-    init_guest_l4_table(l4tab, d, 0);
+    init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
+                      d, INVALID_MFN, true);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     if ( is_pv_32bit_domain(d) )
         v->arch.guest_table_user = v->arch.guest_table;
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index c8b9cb6..a242037 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -35,7 +35,7 @@ static int setup_compat_l4(struct vcpu *v)
 
     l4tab = __map_domain_page(pg);
     clear_page(l4tab);
-    init_guest_l4_table(l4tab, v->domain, 1);
+    init_xen_l4_slots(l4tab, page_to_mfn(pg), v->domain, INVALID_MFN, false);
     unmap_domain_page(l4tab);
 
     /* This page needs to look like a pagetable so that it can be shadowed */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index da3c5e2..8362608 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -341,8 +341,8 @@ int free_page_type(struct page_info *page, unsigned long type,
                    int preemptible);
 
 void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
-void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
-                         bool_t zap_ro_mpt);
+void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
+                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt);
 bool fill_ro_mpt(mfn_t mfn);
 void zap_ro_mpt(mfn_t mfn);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory"
  2017-10-12 13:54 ` [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory" Andrew Cooper
@ 2017-10-12 14:46   ` Wei Liu
  2017-10-12 14:55   ` Jan Beulich
  1 sibling, 0 replies; 16+ messages in thread
From: Wei Liu @ 2017-10-12 14:46 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Julien Grall, Wei Liu, Jan Beulich, Xen-devel

On Thu, Oct 12, 2017 at 02:54:20PM +0100, Andrew Cooper wrote:
> This reverts commit f3b95fd07fdb55b1db091fede1b9a7c71f1eaa1b and
> 1bd39738a5a34f529a610fb275cc83ee5ac7547a.
> 
> The following patches (post XSA-243 fixes) requires init_guest_l4_table()
> being common code.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory"
  2017-10-12 13:54 ` [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory" Andrew Cooper
  2017-10-12 14:46   ` Wei Liu
@ 2017-10-12 14:55   ` Jan Beulich
  1 sibling, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2017-10-12 14:55 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Julien Grall, Wei Liu, Xen-devel

>>> On 12.10.17 at 15:54, <andrew.cooper3@citrix.com> wrote:
> This reverts commit f3b95fd07fdb55b1db091fede1b9a7c71f1eaa1b and
> 1bd39738a5a34f529a610fb275cc83ee5ac7547a.
> 
> The following patches (post XSA-243 fixes) requires init_guest_l4_table()
> being common code.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots()
  2017-10-12 13:54 ` [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots() Andrew Cooper
@ 2017-10-12 14:57   ` Jan Beulich
  2017-10-12 14:59   ` Wei Liu
  2017-10-18 11:38   ` George Dunlap
  2 siblings, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2017-10-12 14:57 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Tim Deegan, Julien Grall, Wei Liu, Xen-devel

>>> On 12.10.17 at 15:54, <andrew.cooper3@citrix.com> wrote:
> Having all of this logic together makes it easier to follow Xen's virtual
> setup across the whole system.
> 
> No functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots()
  2017-10-12 13:54 ` [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots() Andrew Cooper
  2017-10-12 14:57   ` Jan Beulich
@ 2017-10-12 14:59   ` Wei Liu
  2017-10-18 11:38   ` George Dunlap
  2 siblings, 0 replies; 16+ messages in thread
From: Wei Liu @ 2017-10-12 14:59 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Wei Liu, George Dunlap, Tim Deegan, Xen-devel, Julien Grall, Jan Beulich

On Thu, Oct 12, 2017 at 02:54:21PM +0100, Andrew Cooper wrote:
> Having all of this logic together makes it easier to follow Xen's virtual
> setup across the whole system.
> 
> No functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-10-12 13:54 ` [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
@ 2017-10-12 15:08   ` Jan Beulich
  2017-10-12 15:24     ` Andrew Cooper
  2017-10-12 15:13   ` Wei Liu
  2017-10-18 11:35   ` George Dunlap
  2 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2017-10-12 15:08 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Tim Deegan, Julien Grall, Wei Liu, Xen-devel

>>> On 12.10.17 at 15:54, <andrew.cooper3@citrix.com> wrote:
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -391,41 +391,24 @@ int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
>      return 0;
>  }
>  
> -static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
> -{
> -    struct domain *d = v->domain;
> -    l4_pgentry_t *l4e;
> -
> -    l4e = map_domain_page(l4mfn);
> -
> -    /* Copy the common Xen mappings from the idle domain */
> -    memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
> -           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
> -           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
> -
> -    /* Install the per-domain mappings for this domain */
> -    l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
> -        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
> -
> -    /* Install a linear mapping */
> -    l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
> -        l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
> -
> -    unmap_domain_page(l4e);
> -}
> -
>  static mfn_t hap_make_monitor_table(struct vcpu *v)
>  {
>      struct domain *d = v->domain;
>      struct page_info *pg;
> +    l4_pgentry_t *l4e;
>      mfn_t m4mfn;
>  
>      ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
>  
>      if ( (pg = hap_alloc(d)) == NULL )
>          goto oom;
> +
>      m4mfn = page_to_mfn(pg);
> -    hap_install_xen_entries_in_l4(v, m4mfn);
> +    l4e = __map_domain_page(pg);

If you obtain the MFN anyway, map_domain_page() is cheaper
generated code wise.

> --- a/xen/arch/x86/pv/domain.c
> +++ b/xen/arch/x86/pv/domain.c
> @@ -35,7 +35,7 @@ static int setup_compat_l4(struct vcpu *v)
>  
>      l4tab = __map_domain_page(pg);
>      clear_page(l4tab);
> -    init_guest_l4_table(l4tab, v->domain, 1);
> +    init_xen_l4_slots(l4tab, page_to_mfn(pg), v->domain, INVALID_MFN, false);

Perhaps worth avoiding the double translation here too.

In any event
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-10-12 13:54 ` [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
  2017-10-12 15:08   ` Jan Beulich
@ 2017-10-12 15:13   ` Wei Liu
  2017-10-18 11:35   ` George Dunlap
  2 siblings, 0 replies; 16+ messages in thread
From: Wei Liu @ 2017-10-12 15:13 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Wei Liu, George Dunlap, Tim Deegan, Xen-devel, Julien Grall, Jan Beulich

On Thu, Oct 12, 2017 at 02:54:22PM +0100, Andrew Cooper wrote:
> There are currently three functions which write L4 pagetables for Xen, but
> they all behave subtly differently.  sh_install_xen_entries_in_l4() in
> particular is catering for two different usecases, which makes the safety of
> the linear mappings hard to follow.
> 
> By consolidating the L4 pagetable writing in a single function, the resulting
> setup of Xen's virtual layout is easier to understand.
> 
> No practical changes to the resulting L4, although the logic has been
> rearranged to avoid rewriting some slots.  This changes the zap_ro_mpt
> parameter to simply ro_mpt.
> 
> Both {hap,sh}_install_xen_entries_in_l4() get folded into their callers.  The
> hap side only a single caller, while the shadow side has two.  The shadow
> split helps highlight the correctness of the linear slots.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-10-12 15:08   ` Jan Beulich
@ 2017-10-12 15:24     ` Andrew Cooper
  2017-10-12 15:41       ` Jan Beulich
  0 siblings, 1 reply; 16+ messages in thread
From: Andrew Cooper @ 2017-10-12 15:24 UTC (permalink / raw)
  To: Jan Beulich; +Cc: George Dunlap, Wei Liu, Julien Grall, Tim Deegan, Xen-devel

On 12/10/17 16:08, Jan Beulich wrote:
>>>> On 12.10.17 at 15:54, <andrew.cooper3@citrix.com> wrote:
>> --- a/xen/arch/x86/mm/hap/hap.c
>> +++ b/xen/arch/x86/mm/hap/hap.c
>> @@ -391,41 +391,24 @@ int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
>>      return 0;
>>  }
>>  
>> -static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
>> -{
>> -    struct domain *d = v->domain;
>> -    l4_pgentry_t *l4e;
>> -
>> -    l4e = map_domain_page(l4mfn);
>> -
>> -    /* Copy the common Xen mappings from the idle domain */
>> -    memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
>> -           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
>> -           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
>> -
>> -    /* Install the per-domain mappings for this domain */
>> -    l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
>> -        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
>> -
>> -    /* Install a linear mapping */
>> -    l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
>> -        l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
>> -
>> -    unmap_domain_page(l4e);
>> -}
>> -
>>  static mfn_t hap_make_monitor_table(struct vcpu *v)
>>  {
>>      struct domain *d = v->domain;
>>      struct page_info *pg;
>> +    l4_pgentry_t *l4e;
>>      mfn_t m4mfn;
>>  
>>      ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
>>  
>>      if ( (pg = hap_alloc(d)) == NULL )
>>          goto oom;
>> +
>>      m4mfn = page_to_mfn(pg);
>> -    hap_install_xen_entries_in_l4(v, m4mfn);
>> +    l4e = __map_domain_page(pg);
> If you obtain the MFN anyway, map_domain_page() is cheaper
> generated code wise.

Ah yes.  Given that __map_domain_page() is a define, I'd hope the
compiler can spot and optimise away the double page_to_mfn().

Either way, fixed.

>
>> --- a/xen/arch/x86/pv/domain.c
>> +++ b/xen/arch/x86/pv/domain.c
>> @@ -35,7 +35,7 @@ static int setup_compat_l4(struct vcpu *v)
>>  
>>      l4tab = __map_domain_page(pg);
>>      clear_page(l4tab);
>> -    init_guest_l4_table(l4tab, v->domain, 1);
>> +    init_xen_l4_slots(l4tab, page_to_mfn(pg), v->domain, INVALID_MFN, false);
> Perhaps worth avoiding the double translation here too.
>
> In any event
> Reviewed-by: Jan Beulich <jbeulich@suse.com>

Just to confirm, the additional delta is:

andrewcoop@andrewcoop:/local/xen.git/xen$ git diff
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 1e7e8d0..41deb90 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -404,7 +404,7 @@ static mfn_t hap_make_monitor_table(struct vcpu *v)
         goto oom;
 
     m4mfn = page_to_mfn(pg);
-    l4e = __map_domain_page(pg);
+    l4e = map_domain_page(m4mfn);
 
     init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
     unmap_domain_page(l4e);
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index a242037..2fb1996 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -28,14 +28,16 @@ static int setup_compat_l4(struct vcpu *v)
 {
     struct page_info *pg;
     l4_pgentry_t *l4tab;
+    mfn_t mfn;
 
     pg = alloc_domheap_page(v->domain, MEMF_no_owner);
     if ( pg == NULL )
         return -ENOMEM;
 
-    l4tab = __map_domain_page(pg);
+    mfn = page_to_mfn(pg);
+    l4tab = map_domain_page(mfn);
     clear_page(l4tab);
-    init_xen_l4_slots(l4tab, page_to_mfn(pg), v->domain, INVALID_MFN, false);
+    init_xen_l4_slots(l4tab, mfn, v->domain, INVALID_MFN, false);
     unmap_domain_page(l4tab);
 
     /* This page needs to look like a pagetable so that it can be shadowed */


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-10-12 15:24     ` Andrew Cooper
@ 2017-10-12 15:41       ` Jan Beulich
  0 siblings, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2017-10-12 15:41 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Tim Deegan, Julien Grall, Wei Liu, Xen-devel

>>> On 12.10.17 at 17:24, <andrew.cooper3@citrix.com> wrote:
> On 12/10/17 16:08, Jan Beulich wrote:
>>>>> On 12.10.17 at 15:54, <andrew.cooper3@citrix.com> wrote:
>>> --- a/xen/arch/x86/mm/hap/hap.c
>>> +++ b/xen/arch/x86/mm/hap/hap.c
>>> @@ -391,41 +391,24 @@ int hap_set_allocation(struct domain *d, unsigned int 
> pages, bool *preempted)
>>>      return 0;
>>>  }
>>>  
>>> -static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
>>> -{
>>> -    struct domain *d = v->domain;
>>> -    l4_pgentry_t *l4e;
>>> -
>>> -    l4e = map_domain_page(l4mfn);
>>> -
>>> -    /* Copy the common Xen mappings from the idle domain */
>>> -    memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
>>> -           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
>>> -           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
>>> -
>>> -    /* Install the per-domain mappings for this domain */
>>> -    l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
>>> -        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
>>> -
>>> -    /* Install a linear mapping */
>>> -    l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
>>> -        l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
>>> -
>>> -    unmap_domain_page(l4e);
>>> -}
>>> -
>>>  static mfn_t hap_make_monitor_table(struct vcpu *v)
>>>  {
>>>      struct domain *d = v->domain;
>>>      struct page_info *pg;
>>> +    l4_pgentry_t *l4e;
>>>      mfn_t m4mfn;
>>>  
>>>      ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
>>>  
>>>      if ( (pg = hap_alloc(d)) == NULL )
>>>          goto oom;
>>> +
>>>      m4mfn = page_to_mfn(pg);
>>> -    hap_install_xen_entries_in_l4(v, m4mfn);
>>> +    l4e = __map_domain_page(pg);
>> If you obtain the MFN anyway, map_domain_page() is cheaper
>> generated code wise.
> 
> Ah yes.  Given that __map_domain_page() is a define, I'd hope the
> compiler can spot and optimise away the double page_to_mfn().

Considering this is a non-trivial operation, I'm afraid many (if not
all) compiler versions won't be smart enough.

>>> --- a/xen/arch/x86/pv/domain.c
>>> +++ b/xen/arch/x86/pv/domain.c
>>> @@ -35,7 +35,7 @@ static int setup_compat_l4(struct vcpu *v)
>>>  
>>>      l4tab = __map_domain_page(pg);
>>>      clear_page(l4tab);
>>> -    init_guest_l4_table(l4tab, v->domain, 1);
>>> +    init_xen_l4_slots(l4tab, page_to_mfn(pg), v->domain, INVALID_MFN, 
> false);
>> Perhaps worth avoiding the double translation here too.
>>
>> In any event
>> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> 
> Just to confirm, the additional delta is:

Yes, looks fine, thanks.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH for 4.10 0/3] XSA-243 followup
  2017-10-12 13:54 [PATCH for 4.10 0/3] XSA-243 followup Andrew Cooper
                   ` (2 preceding siblings ...)
  2017-10-12 13:54 ` [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
@ 2017-10-13  9:47 ` Julien Grall
  2017-10-14 17:45 ` Tim Deegan
  4 siblings, 0 replies; 16+ messages in thread
From: Julien Grall @ 2017-10-13  9:47 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel
  Cc: George Dunlap, Tim Deegan, Julien Grall, Wei Liu, Jan Beulich

Hi Andrew,

On 12/10/17 14:54, Andrew Cooper wrote:
> The important change here is in patch 3, which is intended to remove the
> correct-but-dangerous-looking construction of linear pagetables slots for
> shadowed guests.

Release-acked-by: Julien Grall <julien.gral@linaro.org>

Cheers,

> 
> Andrew Cooper (3):
>    Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out
>      pv_arch_init_memory"
>    x86/mm: Consolidate all Xen L2 slot writing into
>      init_xen_pae_l2_slots()
>    x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
> 
>   xen/arch/x86/mm.c              | 144 ++++++++++++++++++++++++++++++++++++---
>   xen/arch/x86/mm/hap/hap.c      |  31 ++-------
>   xen/arch/x86/mm/shadow/multi.c | 148 +++++++++++------------------------------
>   xen/arch/x86/pv/dom0_build.c   |   5 +-
>   xen/arch/x86/pv/domain.c       |   7 +-
>   xen/arch/x86/pv/mm.c           |  82 -----------------------
>   xen/arch/x86/pv/mm.h           |   3 -
>   xen/include/asm-x86/mm.h       |   3 +
>   xen/include/asm-x86/pv/mm.h    |   4 --
>   9 files changed, 187 insertions(+), 240 deletions(-)
> 

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH for 4.10 0/3] XSA-243 followup
  2017-10-12 13:54 [PATCH for 4.10 0/3] XSA-243 followup Andrew Cooper
                   ` (3 preceding siblings ...)
  2017-10-13  9:47 ` [PATCH for 4.10 0/3] XSA-243 followup Julien Grall
@ 2017-10-14 17:45 ` Tim Deegan
  4 siblings, 0 replies; 16+ messages in thread
From: Tim Deegan @ 2017-10-14 17:45 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: George Dunlap, Julien Grall, Wei Liu, Jan Beulich, Xen-devel

At 14:54 +0100 on 12 Oct (1507820059), Andrew Cooper wrote:
> The important change here is in patch 3, which is intended to remove the
> correct-but-dangerous-looking construction of linear pagetables slots for
> shadowed guests.

> Andrew Cooper (3):
>   Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out
>     pv_arch_init_memory"
>   x86/mm: Consolidate all Xen L2 slot writing into
>     init_xen_pae_l2_slots()
>   x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()

Acked-by: Tim Deegan <tim@xen.org>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-10-12 13:54 ` [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
  2017-10-12 15:08   ` Jan Beulich
  2017-10-12 15:13   ` Wei Liu
@ 2017-10-18 11:35   ` George Dunlap
  2 siblings, 0 replies; 16+ messages in thread
From: George Dunlap @ 2017-10-18 11:35 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel
  Cc: George Dunlap, Wei Liu, Julien Grall, Tim Deegan, Jan Beulich

On 10/12/2017 02:54 PM, Andrew Cooper wrote:
> There are currently three functions which write L4 pagetables for Xen, but
> they all behave subtly differently.  sh_install_xen_entries_in_l4() in
> particular is catering for two different usecases, which makes the safety of
> the linear mappings hard to follow.
> 
> By consolidating the L4 pagetable writing in a single function, the resulting
> setup of Xen's virtual layout is easier to understand.
> 
> No practical changes to the resulting L4, although the logic has been
> rearranged to avoid rewriting some slots.  This changes the zap_ro_mpt
> parameter to simply ro_mpt.
> 
> Both {hap,sh}_install_xen_entries_in_l4() get folded into their callers.  The
> hap side only a single caller, while the shadow side has two.  The shadow
> split helps highlight the correctness of the linear slots.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: George Dunlap <george.dunlap@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots()
  2017-10-12 13:54 ` [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots() Andrew Cooper
  2017-10-12 14:57   ` Jan Beulich
  2017-10-12 14:59   ` Wei Liu
@ 2017-10-18 11:38   ` George Dunlap
  2 siblings, 0 replies; 16+ messages in thread
From: George Dunlap @ 2017-10-18 11:38 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel
  Cc: George Dunlap, Wei Liu, Julien Grall, Tim Deegan, Jan Beulich

On 10/12/2017 02:54 PM, Andrew Cooper wrote:
> Having all of this logic together makes it easier to follow Xen's virtual
> setup across the whole system.
> 
> No functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: George Dunlap <george.dunlap@citrix.com>

> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Tim Deegan <tim@xen.org>
> CC: George Dunlap <george.dunlap@eu.citrix.com>
> CC: Wei Liu <wei.liu2@citrix.com>
> CC: Julien Grall <julien.grall@arm.com>
> ---
>  xen/arch/x86/mm.c              | 16 +++++++++-------
>  xen/arch/x86/mm/shadow/multi.c | 42 +++++++++++++++---------------------------
>  xen/include/asm-x86/mm.h       |  1 +
>  3 files changed, 25 insertions(+), 34 deletions(-)
> 
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index f90a42a..ea4af16 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -1433,13 +1433,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type,
>      }
>  
>      if ( rc >= 0 && (type & PGT_pae_xen_l2) )
> -    {
> -        /* Xen private mappings. */
> -        memcpy(&pl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
> -               &compat_idle_pg_table_l2[
> -                   l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
> -               COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*pl2e));
> -    }
> +        init_xen_pae_l2_slots(pl2e, d);
>  
>      unmap_domain_page(pl2e);
>      return rc > 0 ? 0 : rc;
> @@ -1518,6 +1512,14 @@ static int alloc_l3_table(struct page_info *page)
>      return rc > 0 ? 0 : rc;
>  }
>  
> +void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d)
> +{
> +    memcpy(&l2t[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
> +           &compat_idle_pg_table_l2[
> +               l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
> +           COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2t));
> +}
> +
>  /*
>   * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
>   * values a guest may have left there from alloc_l4_table().
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index d540af1..1b76e0c 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -1521,31 +1521,6 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
>  }
>  #endif
>  
> -#if GUEST_PAGING_LEVELS >= 3
> -// For 3-on-3 PV guests, we need to make sure the xen mappings are in
> -// place, which means that we need to populate the l2h entry in the l3
> -// table.
> -
> -static void sh_install_xen_entries_in_l2h(struct domain *d, mfn_t sl2hmfn)
> -{
> -    shadow_l2e_t *sl2e;
> -
> -    if ( !is_pv_32bit_domain(d) )
> -        return;
> -
> -    sl2e = map_domain_page(sl2hmfn);
> -    BUILD_BUG_ON(sizeof (l2_pgentry_t) != sizeof (shadow_l2e_t));
> -
> -    /* Copy the common Xen mappings from the idle domain */
> -    memcpy(
> -        &sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
> -        &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
> -        COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
> -
> -    unmap_domain_page(sl2e);
> -}
> -#endif
> -
>  
>  /**************************************************************************/
>  /* Create a shadow of a given guest page.
> @@ -1610,7 +1585,14 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
>  #endif
>  #if GUEST_PAGING_LEVELS >= 3
>          case SH_type_l2h_shadow:
> -            sh_install_xen_entries_in_l2h(v->domain, smfn);
> +            BUILD_BUG_ON(sizeof(l2_pgentry_t) != sizeof(shadow_l2e_t));
> +            if ( is_pv_32bit_domain(d) )
> +            {
> +                shadow_l2e_t *l2t = map_domain_page(smfn);
> +
> +                init_xen_pae_l2_slots(l2t, d);
> +                unmap_domain_page(l2t);
> +            }
>              break;
>  #endif
>          default: /* Do nothing */ break;
> @@ -1677,6 +1659,8 @@ sh_make_monitor_table(struct vcpu *v)
>  
>              if ( is_pv_32bit_domain(d) )
>              {
> +                l2_pgentry_t *l2t;
> +
>                  /* For 32-bit PV guests, we need to map the 32-bit Xen
>                   * area into its usual VAs in the monitor tables */
>                  m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
> @@ -1687,7 +1671,11 @@ sh_make_monitor_table(struct vcpu *v)
>                  mfn_to_page(m2mfn)->shadow_flags = 2;
>                  l3e = map_domain_page(m3mfn);
>                  l3e[3] = l3e_from_mfn(m2mfn, _PAGE_PRESENT);
> -                sh_install_xen_entries_in_l2h(d, m2mfn);
> +
> +                l2t = map_domain_page(m2mfn);
> +                init_xen_pae_l2_slots(l2t, d);
> +                unmap_domain_page(l2t);
> +
>                  unmap_domain_page(l3e);
>              }
>  
> diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
> index eeac4d7..da3c5e2 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -340,6 +340,7 @@ static inline void *__page_to_virt(const struct page_info *pg)
>  int free_page_type(struct page_info *page, unsigned long type,
>                     int preemptible);
>  
> +void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
>  void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
>                           bool_t zap_ro_mpt);
>  bool fill_ro_mpt(mfn_t mfn);
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2017-10-18 11:38 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-10-12 13:54 [PATCH for 4.10 0/3] XSA-243 followup Andrew Cooper
2017-10-12 13:54 ` [PATCH 1/3] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory" Andrew Cooper
2017-10-12 14:46   ` Wei Liu
2017-10-12 14:55   ` Jan Beulich
2017-10-12 13:54 ` [PATCH 2/3] x86/mm: Consolidate all Xen L2 slot writing into init_xen_pae_l2_slots() Andrew Cooper
2017-10-12 14:57   ` Jan Beulich
2017-10-12 14:59   ` Wei Liu
2017-10-18 11:38   ` George Dunlap
2017-10-12 13:54 ` [PATCH 3/3] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
2017-10-12 15:08   ` Jan Beulich
2017-10-12 15:24     ` Andrew Cooper
2017-10-12 15:41       ` Jan Beulich
2017-10-12 15:13   ` Wei Liu
2017-10-18 11:35   ` George Dunlap
2017-10-13  9:47 ` [PATCH for 4.10 0/3] XSA-243 followup Julien Grall
2017-10-14 17:45 ` Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.