All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
@ 2017-09-01 17:07 Andrew Cooper
  2017-09-03 12:01 ` Tim Deegan
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Andrew Cooper @ 2017-09-01 17:07 UTC (permalink / raw)
  To: Xen-devel; +Cc: George Dunlap, Andrew Cooper, Tim Deegan, Wei Liu, Jan Beulich

Having all of this logic together makes it easier to follow Xen's virtual
setup across the whole system.

No practical changes to the resulting L4, although the logic has been
rearanged to avoid rewriting some slots.  This changes the zap_ro_mpt
parameter to simply ro_mpt.  Another side effect is that highmem-start= is
applied consistently to all L4 tables, not just PV ones.

hap_install_xen_entries_in_l4() gets folded into its sole caller.
sh_install_xen_entries_in_l4() however stays (as it has multiple callers), and
keeps its preexisting safety checks.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Tim Deegan <tim@xen.org>
CC: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/mm.c              | 84 ++++++++++++++++++++++++++++++++----------
 xen/arch/x86/mm/hap/hap.c      | 31 ++++------------
 xen/arch/x86/mm/shadow/multi.c | 39 +++++---------------
 xen/arch/x86/pv/dom0_build.c   |  3 +-
 xen/arch/x86/pv/domain.c       |  2 +-
 xen/include/asm-x86/mm.h       |  4 +-
 6 files changed, 86 insertions(+), 77 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index f4b9747..557b5a5 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1614,37 +1614,82 @@ static int alloc_l3_table(struct page_info *page)
 }
 
 /*
+ * Fill an L4 with Xen entries.
+ *
  * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
  * values a guest may have left there from alloc_l4_table().
+ *
+ * l4t and l4mfn are mandatory, but l4mfn doesn't need to be the mfn under
+ * *l4t.  All other parameters are optional and will either fill or zero the
+ * appropriate slots.  Pagetables not shared with guests will gain the
+ * extended directmap.
  */
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
-                         bool zap_ro_mpt)
-{
-    /* Xen private mappings. */
-    memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
+void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
+                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt)
+{
+    /* Slot 256: RO M2P (if applicable). */
+    l4t[l4_table_offset(RO_MPT_VIRT_START)] =
+        ro_mpt ? idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]
+               : l4e_empty();
+
+    /* Slot 257: PCI MMCFG. */
+    l4t[l4_table_offset(PCI_MCFG_VIRT_START)] =
+        idle_pg_table[l4_table_offset(PCI_MCFG_VIRT_START)];
+
+    /* Slot 258: Self linear mappings. */
+    ASSERT(!mfn_eq(l4mfn, INVALID_MFN));
+    l4t[l4_table_offset(LINEAR_PT_VIRT_START)] =
+        l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
+
+    /* Slot 259: Shadow linear mappings (if applicable) .*/
+    l4t[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
+        mfn_eq(sl4mfn, INVALID_MFN) ? l4e_empty() :
+        l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
+
+    /* Slot 260: Per-domain mappings (if applicable). */
+    l4t[l4_table_offset(PERDOMAIN_VIRT_START)] =
+        d ? l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW)
+          : l4e_empty();
+
+    /* Slot 261-: text/data/bss, RW M2P, vmap, frametable, directmap. */
 #ifndef NDEBUG
     if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
     {
-        l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
-                                    root_pgt_pv_xen_slots];
+        /*
+         * If using highmem-start=, artificially shorten the directmap to
+         * simulate very large machines.
+         */
+        l4_pgentry_t *next;
+
+        memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
+               &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
+               (ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots -
+                l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
+
+        next = &l4t[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots];
 
         if ( l4e_get_intpte(split_l4e) )
             *next++ = split_l4e;
 
         memset(next, 0,
-               _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
+               _p(&l4t[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
     }
-#else
-    BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
+    else
 #endif
-    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
-    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
-    if ( zap_ro_mpt || is_pv_32bit_domain(d) )
-        l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+    {
+        /*
+         * For PV guests, provide the shortened directmap, up to PV_XEN_SLOTS.
+         * For HVM guests and the idle vcpus, provide the extended directmap.
+         */
+        unsigned int slots = ((d && !paging_mode_external(d))
+                              ? ROOT_PAGETABLE_PV_XEN_SLOTS
+                              : ROOT_PAGETABLE_XEN_SLOTS);
+
+        memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
+               &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
+               (ROOT_PAGETABLE_FIRST_XEN_SLOT + slots -
+                l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
+    }
 }
 
 bool fill_ro_mpt(mfn_t mfn)
@@ -1721,7 +1766,8 @@ static int alloc_l4_table(struct page_info *page)
 
     if ( rc >= 0 )
     {
-        init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+        init_xen_l4_slots(pl4e, _mfn(pfn),
+                          d, INVALID_MFN, VM_ASSIST(d, m2p_strict));
         atomic_inc(&d->arch.pv_domain.nr_l4_pages);
         rc = 0;
     }
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 15e4877..ac26a2a 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -391,41 +391,24 @@ int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
     return 0;
 }
 
-static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
-{
-    struct domain *d = v->domain;
-    l4_pgentry_t *l4e;
-
-    l4e = map_domain_page(l4mfn);
-
-    /* Copy the common Xen mappings from the idle domain */
-    memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
-
-    /* Install the per-domain mappings for this domain */
-    l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
-
-    /* Install a linear mapping */
-    l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
-
-    unmap_domain_page(l4e);
-}
-
 static mfn_t hap_make_monitor_table(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct page_info *pg;
+    l4_pgentry_t *l4e;
     mfn_t m4mfn;
 
     ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
 
     if ( (pg = hap_alloc(d)) == NULL )
         goto oom;
+
     m4mfn = page_to_mfn(pg);
-    hap_install_xen_entries_in_l4(v, m4mfn);
+    l4e = __map_domain_page(pg);
+
+    init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
+    unmap_domain_page(l4e);
+
     return m4mfn;
 
  oom:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 7ab8f29..40baa7f 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1460,38 +1460,10 @@ do {                                                                    \
 void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
 {
     shadow_l4e_t *sl4e;
-    unsigned int slots;
 
     sl4e = map_domain_page(sl4mfn);
     BUILD_BUG_ON(sizeof (l4_pgentry_t) != sizeof (shadow_l4e_t));
 
-    /* Copy the common Xen mappings from the idle domain */
-    slots = (shadow_mode_external(d)
-             ? ROOT_PAGETABLE_XEN_SLOTS
-             : ROOT_PAGETABLE_PV_XEN_SLOTS);
-    memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           slots * sizeof(l4_pgentry_t));
-
-    /* Install the per-domain mappings for this domain */
-    sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] =
-        shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
-                            __PAGE_HYPERVISOR_RW);
-
-    if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
-         !VM_ASSIST(d, m2p_strict) )
-    {
-        /* open coded zap_ro_mpt(mfn_x(sl4mfn)): */
-        sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
-    }
-
-    /* Shadow linear mapping for 4-level shadows.  N.B. for 3-level
-     * shadows on 64-bit xen, this linear mapping is later replaced by the
-     * monitor pagetable structure, which is built in make_monitor_table
-     * and maintained by sh_update_linear_entries. */
-    sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
-        shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
-
     /*
      * It is not safe to create guest linear mappings into a translated
      * domain.  For translated domains, this function is used once to create a
@@ -1503,8 +1475,15 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
     else
         ASSERT(!mfn_eq(gl4mfn, sl4mfn));
 
-    sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =
-        shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR_RW);
+    /*
+     * Shadow linear mapping for 4-level shadows.  N.B. for 3-level
+     * shadows on 64-bit xen, this linear mapping is later replaced by the
+     * monitor pagetable structure, which is built in make_monitor_table
+     * and maintained by sh_update_linear_entries.
+     */
+    init_xen_l4_slots(sl4e, gl4mfn, d, sl4mfn,
+                      !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
+                      VM_ASSIST(d, m2p_strict));
 
     unmap_domain_page(sl4e);
 }
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index ec7f96d..6541c76 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -588,7 +588,8 @@ int __init dom0_construct_pv(struct domain *d,
         l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
     }
     clear_page(l4tab);
-    init_guest_l4_table(l4tab, d, 0);
+    init_xen_l4_slots(l4tab, _mfn(__pa(l4start) >> PAGE_SHIFT),
+                      d, INVALID_MFN, true);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     if ( is_pv_32bit_domain(d) )
         v->arch.guest_table_user = v->arch.guest_table;
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index c8b9cb6..a242037 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -35,7 +35,7 @@ static int setup_compat_l4(struct vcpu *v)
 
     l4tab = __map_domain_page(pg);
     clear_page(l4tab);
-    init_guest_l4_table(l4tab, v->domain, 1);
+    init_xen_l4_slots(l4tab, page_to_mfn(pg), v->domain, INVALID_MFN, false);
     unmap_domain_page(l4tab);
 
     /* This page needs to look like a pagetable so that it can be shadowed */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 4c03a33..f218c0c 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -329,8 +329,8 @@ static inline void *__page_to_virt(const struct page_info *pg)
 int free_page_type(struct page_info *page, unsigned long type,
                    int preemptible);
 
-void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
-                         bool_t zap_ro_mpt);
+void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
+                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt);
 bool fill_ro_mpt(mfn_t mfn);
 void zap_ro_mpt(mfn_t mfn);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-09-01 17:07 [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
@ 2017-09-03 12:01 ` Tim Deegan
  2017-09-04  9:25   ` Andrew Cooper
  2017-09-04 10:09 ` Jan Beulich
  2017-09-26 10:12 ` George Dunlap
  2 siblings, 1 reply; 7+ messages in thread
From: Tim Deegan @ 2017-09-03 12:01 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Wei Liu, Jan Beulich, Xen-devel

At 18:07 +0100 on 01 Sep (1504289261), Andrew Cooper wrote:
>      if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
>      {
> -        l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
> -                                    root_pgt_pv_xen_slots];
> +        /*
> +         * If using highmem-start=, artificially shorten the directmap to
> +         * simulate very large machines.
> +         */
> +        l4_pgentry_t *next;
> +
> +        memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
> +               &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
> +               (ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots -
> +                l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
> +
> +        next = &l4t[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots];
>  
>          if ( l4e_get_intpte(split_l4e) )
>              *next++ = split_l4e;
>  
>          memset(next, 0,
> -               _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
> +               _p(&l4t[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
>      }
> -#else
> -    BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
> +    else
>  #endif
> -    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
> -        l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
> -    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
> -        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
> -    if ( zap_ro_mpt || is_pv_32bit_domain(d) )
> -        l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
> +    {
> +        /*
> +         * For PV guests, provide the shortened directmap, up to PV_XEN_SLOTS.
> +         * For HVM guests and the idle vcpus, provide the extended directmap.
> +         */
> +        unsigned int slots = ((d && !paging_mode_external(d))
> +                              ? ROOT_PAGETABLE_PV_XEN_SLOTS
> +                              : ROOT_PAGETABLE_XEN_SLOTS);
> +
> +        memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
> +               &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
> +               (ROOT_PAGETABLE_FIRST_XEN_SLOT + slots -
> +                l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));

Does this branch need a memset(0) for the PV case, to zap the higher
directmap slots?  

The shadow changes all look fine, so:
Acked-by: Tim Deegan <tim@xen.org>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-09-03 12:01 ` Tim Deegan
@ 2017-09-04  9:25   ` Andrew Cooper
  0 siblings, 0 replies; 7+ messages in thread
From: Andrew Cooper @ 2017-09-04  9:25 UTC (permalink / raw)
  To: Tim Deegan; +Cc: George Dunlap, Wei Liu, Jan Beulich, Xen-devel

On 03/09/17 13:01, Tim Deegan wrote:
> At 18:07 +0100 on 01 Sep (1504289261), Andrew Cooper wrote:
>>      if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
>>      {
>> -        l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
>> -                                    root_pgt_pv_xen_slots];
>> +        /*
>> +         * If using highmem-start=, artificially shorten the directmap to
>> +         * simulate very large machines.
>> +         */
>> +        l4_pgentry_t *next;
>> +
>> +        memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
>> +               &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
>> +               (ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots -
>> +                l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
>> +
>> +        next = &l4t[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots];
>>  
>>          if ( l4e_get_intpte(split_l4e) )
>>              *next++ = split_l4e;
>>  
>>          memset(next, 0,
>> -               _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
>> +               _p(&l4t[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
>>      }
>> -#else
>> -    BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
>> +    else
>>  #endif
>> -    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
>> -        l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
>> -    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
>> -        l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
>> -    if ( zap_ro_mpt || is_pv_32bit_domain(d) )
>> -        l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
>> +    {
>> +        /*
>> +         * For PV guests, provide the shortened directmap, up to PV_XEN_SLOTS.
>> +         * For HVM guests and the idle vcpus, provide the extended directmap.
>> +         */
>> +        unsigned int slots = ((d && !paging_mode_external(d))
>> +                              ? ROOT_PAGETABLE_PV_XEN_SLOTS
>> +                              : ROOT_PAGETABLE_XEN_SLOTS);
>> +
>> +        memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
>> +               &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
>> +               (ROOT_PAGETABLE_FIRST_XEN_SLOT + slots -
>> +                l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
> Does this branch need a memset(0) for the PV case, to zap the higher
> directmap slots?  

init_xen_l4_slots() is called after all the guest entries have been
validated.  Zapping them here will get us into some reference counting
fun. :)

>
> The shadow changes all look fine, so:
> Acked-by: Tim Deegan <tim@xen.org>

Thanks,

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-09-01 17:07 [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
  2017-09-03 12:01 ` Tim Deegan
@ 2017-09-04 10:09 ` Jan Beulich
  2017-09-04 10:42   ` Andrew Cooper
  2017-09-26 10:12 ` George Dunlap
  2 siblings, 1 reply; 7+ messages in thread
From: Jan Beulich @ 2017-09-04 10:09 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Tim Deegan, Wei Liu, Xen-devel

>>> On 01.09.17 at 19:07, <andrew.cooper3@citrix.com> wrote:
> Having all of this logic together makes it easier to follow Xen's virtual
> setup across the whole system.
> 
> No practical changes to the resulting L4, although the logic has been
> rearanged to avoid rewriting some slots.  This changes the zap_ro_mpt
> parameter to simply ro_mpt.  Another side effect is that highmem-start= is
> applied consistently to all L4 tables, not just PV ones.

Is this side effect really a good idea to have? I.e. are you certain
HVM-only code (i.e. such known to only ever run in HVM or idle
vCPU context) is all prepared to not have everything direct-
mapped, and is using map_domain_page() et al everywhere?

> +void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
> +                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt)
> +{
> +    /* Slot 256: RO M2P (if applicable). */
> +    l4t[l4_table_offset(RO_MPT_VIRT_START)] =
> +        ro_mpt ? idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]
> +               : l4e_empty();

While the patch in general looks correct, I'm also not convinced
having the slot numbers here as well as doing the operation in an
open-coded slot-by-slot fashion is a good idea: The comments
and the intended ordering here can easily go stale with future
adjustments to the virtual address layout.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-09-04 10:09 ` Jan Beulich
@ 2017-09-04 10:42   ` Andrew Cooper
  2017-09-04 12:47     ` Jan Beulich
  0 siblings, 1 reply; 7+ messages in thread
From: Andrew Cooper @ 2017-09-04 10:42 UTC (permalink / raw)
  To: Jan Beulich; +Cc: George Dunlap, Tim Deegan, Wei Liu, Xen-devel

On 04/09/17 11:09, Jan Beulich wrote:
>>>> On 01.09.17 at 19:07, <andrew.cooper3@citrix.com> wrote:
>> Having all of this logic together makes it easier to follow Xen's virtual
>> setup across the whole system.
>>
>> No practical changes to the resulting L4, although the logic has been
>> rearanged to avoid rewriting some slots.  This changes the zap_ro_mpt
>> parameter to simply ro_mpt.  Another side effect is that highmem-start= is
>> applied consistently to all L4 tables, not just PV ones.
> Is this side effect really a good idea to have?

Yes.  Otherwise the value of highmem-start= as a debugging mechanism is
rather stunted.

> I.e. are you certain
> HVM-only code (i.e. such known to only ever run in HVM or idle
> vCPU context) is all prepared to not have everything direct-
> mapped, and is using map_domain_page() et al everywhere?

I'm not aware of any places which would violate this.  Then again, as
highmem-start= is too bitrotten to function, I can't test.

>
>> +void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
>> +                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt)
>> +{
>> +    /* Slot 256: RO M2P (if applicable). */
>> +    l4t[l4_table_offset(RO_MPT_VIRT_START)] =
>> +        ro_mpt ? idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]
>> +               : l4e_empty();
> While the patch in general looks correct, I'm also not convinced
> having the slot numbers here as well as doing the operation in an
> open-coded slot-by-slot fashion is a good idea: The comments
> and the intended ordering here can easily go stale with future
> adjustments to the virtual address layout.

The point of having all of this in one place is that there is only one
place to change if the virtual address layout changes, so the chances of
it getting stale are reduced.

The slot-by-slot fashion is how the old function was implemented after
optimisation, except this version is shorter because we don't rewrite
several slots.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-09-04 10:42   ` Andrew Cooper
@ 2017-09-04 12:47     ` Jan Beulich
  0 siblings, 0 replies; 7+ messages in thread
From: Jan Beulich @ 2017-09-04 12:47 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, TimDeegan, Wei Liu, Xen-devel

>>> On 04.09.17 at 12:42, <andrew.cooper3@citrix.com> wrote:
> On 04/09/17 11:09, Jan Beulich wrote:
>>>>> On 01.09.17 at 19:07, <andrew.cooper3@citrix.com> wrote:
>>> Having all of this logic together makes it easier to follow Xen's virtual
>>> setup across the whole system.
>>>
>>> No practical changes to the resulting L4, although the logic has been
>>> rearanged to avoid rewriting some slots.  This changes the zap_ro_mpt
>>> parameter to simply ro_mpt.  Another side effect is that highmem-start= is
>>> applied consistently to all L4 tables, not just PV ones.
>> Is this side effect really a good idea to have?
> 
> Yes.  Otherwise the value of highmem-start= as a debugging mechanism is
> rather stunted.

Why? When it was introduced it was supposed to allow debugging
issues with the partial PV directmap vs the always-full HVM/idle one.

>> I.e. are you certain
>> HVM-only code (i.e. such known to only ever run in HVM or idle
>> vCPU context) is all prepared to not have everything direct-
>> mapped, and is using map_domain_page() et al everywhere?
> 
> I'm not aware of any places which would violate this.  Then again, as
> highmem-start= is too bitrotten to function, I can't test.

I'll see to look into what's broken.

>>> +void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
>>> +                       const struct domain *d, mfn_t sl4mfn, bool ro_mpt)
>>> +{
>>> +    /* Slot 256: RO M2P (if applicable). */
>>> +    l4t[l4_table_offset(RO_MPT_VIRT_START)] =
>>> +        ro_mpt ? idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]
>>> +               : l4e_empty();
>> While the patch in general looks correct, I'm also not convinced
>> having the slot numbers here as well as doing the operation in an
>> open-coded slot-by-slot fashion is a good idea: The comments
>> and the intended ordering here can easily go stale with future
>> adjustments to the virtual address layout.
> 
> The point of having all of this in one place is that there is only one
> place to change if the virtual address layout changes, so the chances of
> it getting stale are reduced.
> 
> The slot-by-slot fashion is how the old function was implemented after
> optimisation, except this version is shorter because we don't rewrite
> several slots.

Well, I certainly did assume the compiler would make this slot-by-
slot either way, so my concern is solely with how the source is
looking.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots()
  2017-09-01 17:07 [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
  2017-09-03 12:01 ` Tim Deegan
  2017-09-04 10:09 ` Jan Beulich
@ 2017-09-26 10:12 ` George Dunlap
  2 siblings, 0 replies; 7+ messages in thread
From: George Dunlap @ 2017-09-26 10:12 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel; +Cc: George Dunlap, Wei Liu, Tim Deegan, Jan Beulich

On 09/01/2017 06:07 PM, Andrew Cooper wrote:
> Having all of this logic together makes it easier to follow Xen's virtual
> setup across the whole system.
> 
> No practical changes to the resulting L4, although the logic has been
> rearanged to avoid rewriting some slots.  This changes the zap_ro_mpt
> parameter to simply ro_mpt.  Another side effect is that highmem-start= is
> applied consistently to all L4 tables, not just PV ones.
> 
> hap_install_xen_entries_in_l4() gets folded into its sole caller.
> sh_install_xen_entries_in_l4() however stays (as it has multiple callers), and
> keeps its preexisting safety checks.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Andy,

This doesn't apply anymore.  Would you still like me to try to review
it, or shall I wait for a resend?

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2017-09-26 10:12 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-01 17:07 [PATCH] x86/mm: Consolidate all Xen L4 slot writing into init_xen_l4_slots() Andrew Cooper
2017-09-03 12:01 ` Tim Deegan
2017-09-04  9:25   ` Andrew Cooper
2017-09-04 10:09 ` Jan Beulich
2017-09-04 10:42   ` Andrew Cooper
2017-09-04 12:47     ` Jan Beulich
2017-09-26 10:12 ` George Dunlap

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.