All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/5] x86: Misc cleanup and improvements
@ 2017-08-29 11:19 Andrew Cooper
  2017-08-29 11:19 ` [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t Andrew Cooper
                   ` (4 more replies)
  0 siblings, 5 replies; 21+ messages in thread
From: Andrew Cooper @ 2017-08-29 11:19 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper

Andrew Cooper (5):
  x86/pv: Switch {fill,zap}_ro_mpt() to using mfn_t
  x86/pv: map_ldt_shadow_page() cleanup
  x86/pv: Consistently use typesafe helpers in all files
  x86/pv: Simplify access to the LDT/GDT ptes
  x86/percpu: Misc cleanup

 xen/arch/x86/domain.c          |  8 +++---
 xen/arch/x86/mm.c              | 62 ++++++++++++++++++++++--------------------
 xen/arch/x86/mm/shadow/multi.c |  4 +--
 xen/arch/x86/percpu.c          | 19 ++++++++++++-
 xen/arch/x86/pv/callback.c     |  6 ++++
 xen/arch/x86/pv/domain.c       |  6 ++++
 xen/arch/x86/pv/emul-gate-op.c |  6 ++++
 xen/arch/x86/pv/emul-priv-op.c | 20 +++++++++-----
 xen/arch/x86/pv/iret.c         |  6 ++++
 xen/arch/x86/pv/traps.c        |  6 ++++
 xen/arch/x86/traps.c           |  2 +-
 xen/include/asm-x86/domain.h   |  5 ++--
 xen/include/asm-x86/mm.h       |  6 ++--
 xen/include/asm-x86/page.h     |  4 +--
 14 files changed, 109 insertions(+), 51 deletions(-)

-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t
  2017-08-29 11:19 [PATCH 0/5] x86: Misc cleanup and improvements Andrew Cooper
@ 2017-08-29 11:19 ` Andrew Cooper
  2017-08-29 11:52   ` Tim Deegan
                     ` (3 more replies)
  2017-08-29 11:19 ` [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup Andrew Cooper
                   ` (3 subsequent siblings)
  4 siblings, 4 replies; 21+ messages in thread
From: Andrew Cooper @ 2017-08-29 11:19 UTC (permalink / raw)
  To: Xen-devel; +Cc: George Dunlap, Andrew Cooper, Wei Liu, Jan Beulich, Tim Deegan

And update all affected callers.  Fix the fill_ro_mpt() prototype to be bool
like its implementation.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/arch/x86/domain.c          |  6 +++---
 xen/arch/x86/mm.c              | 12 ++++++------
 xen/arch/x86/mm/shadow/multi.c |  4 ++--
 xen/include/asm-x86/mm.h       |  4 ++--
 4 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 9b4b959..57c44b1 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -967,7 +967,7 @@ int arch_set_info_guest(
                 {
                     if ( (page->u.inuse.type_info & PGT_type_mask) ==
                          PGT_l4_page_table )
-                        done = !fill_ro_mpt(page_to_mfn(page));
+                        done = !fill_ro_mpt(_mfn(page_to_mfn(page)));
 
                     page_unlock(page);
                 }
@@ -1041,7 +1041,7 @@ int arch_set_info_guest(
         case 0:
             if ( !compat && !VM_ASSIST(d, m2p_strict) &&
                  !paging_mode_refcounts(d) )
-                fill_ro_mpt(cr3_gfn);
+                fill_ro_mpt(_mfn(cr3_gfn));
             break;
         default:
             if ( cr3_page == current->arch.old_guest_table )
@@ -1080,7 +1080,7 @@ int arch_set_info_guest(
                     break;
                 case 0:
                     if ( VM_ASSIST(d, m2p_strict) )
-                        zap_ro_mpt(cr3_gfn);
+                        zap_ro_mpt(_mfn(cr3_gfn));
                     break;
                 }
             }
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index b6d6ae3..79780da 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1616,9 +1616,9 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
 }
 
-bool fill_ro_mpt(unsigned long mfn)
+bool fill_ro_mpt(mfn_t mfn)
 {
-    l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+    l4_pgentry_t *l4tab = map_domain_page(mfn);
     bool ret = false;
 
     if ( !l4e_get_intpte(l4tab[l4_table_offset(RO_MPT_VIRT_START)]) )
@@ -1632,9 +1632,9 @@ bool fill_ro_mpt(unsigned long mfn)
     return ret;
 }
 
-void zap_ro_mpt(unsigned long mfn)
+void zap_ro_mpt(mfn_t mfn)
 {
-    l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+    l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
     unmap_domain_page(l4tab);
@@ -2820,7 +2820,7 @@ int new_guest_cr3(unsigned long mfn)
     invalidate_shadow_ldt(curr, 0);
 
     if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
-        fill_ro_mpt(mfn);
+        fill_ro_mpt(_mfn(mfn));
     curr->arch.guest_table = pagetable_from_pfn(mfn);
     update_cr3(curr);
 
@@ -3194,7 +3194,7 @@ long do_mmuext_op(
                 }
 
                 if ( VM_ASSIST(currd, m2p_strict) )
-                    zap_ro_mpt(op.arg1.mfn);
+                    zap_ro_mpt(_mfn(op.arg1.mfn));
             }
 
             curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index f0eabb6..c5c0af8 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4232,10 +4232,10 @@ sh_update_cr3(struct vcpu *v, int do_locking)
         mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]);
 
         if ( !(v->arch.flags & TF_kernel_mode) && VM_ASSIST(d, m2p_strict) )
-            zap_ro_mpt(mfn_x(smfn));
+            zap_ro_mpt(smfn);
         else if ( (v->arch.flags & TF_kernel_mode) &&
                   !VM_ASSIST(d, m2p_strict) )
-            fill_ro_mpt(mfn_x(smfn));
+            fill_ro_mpt(smfn);
     }
 #else
 #error This should never happen
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index b738c89..5760e05 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -331,8 +331,8 @@ int free_page_type(struct page_info *page, unsigned long type,
 
 void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
                          bool_t zap_ro_mpt);
-bool_t fill_ro_mpt(unsigned long mfn);
-void zap_ro_mpt(unsigned long mfn);
+bool fill_ro_mpt(mfn_t mfn);
+void zap_ro_mpt(mfn_t mfn);
 
 bool is_iomem_page(mfn_t mfn);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup
  2017-08-29 11:19 [PATCH 0/5] x86: Misc cleanup and improvements Andrew Cooper
  2017-08-29 11:19 ` [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t Andrew Cooper
@ 2017-08-29 11:19 ` Andrew Cooper
  2017-08-29 12:35   ` Wei Liu
                     ` (2 more replies)
  2017-08-29 11:19 ` [PATCH 3/5] x86/pv: Consistently use typesafe helpers in all files Andrew Cooper
                   ` (2 subsequent siblings)
  4 siblings, 3 replies; 21+ messages in thread
From: Andrew Cooper @ 2017-08-29 11:19 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Wei Liu, Jan Beulich

Switch the return value from int to bool, to match its semantics.  Switch its
parameter from a frame offset to a byte offset (simplifying the sole caller)
and allowing for an extra sanity check that the fault is within the LDT limit.

Drop the unnecessary gmfn and okay local variables, and correct the gva
parameter to be named linear.  Rename l1e to gl1e, and simplify the
construction of the new pte by simply taking (the now validated) gl1e and
ensuring that _PAGE_RW is set.

Calculate the pte to updated outside of the spinlock, which halves the size of
the critical region.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/mm.c        | 42 +++++++++++++++++++++++-------------------
 xen/arch/x86/traps.c     |  2 +-
 xen/include/asm-x86/mm.h |  2 +-
 3 files changed, 25 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 79780da..c41ed1b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -667,45 +667,49 @@ static int alloc_segdesc_page(struct page_info *page)
 }
 
 
-/* Map shadow page at offset @off. */
-int map_ldt_shadow_page(unsigned int off)
+/*
+ * Map a guests LDT page (at @offset bytes from the start of the LDT) into
+ * Xen's virtual range.  Returns true if the mapping changed, false otherwise.
+ */
+bool map_ldt_shadow_page(unsigned int offset)
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    unsigned long gmfn;
     struct page_info *page;
-    l1_pgentry_t l1e, nl1e;
-    unsigned long gva = v->arch.pv_vcpu.ldt_base + (off << PAGE_SHIFT);
-    int okay;
+    l1_pgentry_t gl1e, *pl1e;
+    unsigned long linear = v->arch.pv_vcpu.ldt_base + offset;
 
     BUG_ON(unlikely(in_irq()));
 
+    /* Hardware limit checking should guarentee this property. */
+    ASSERT((offset >> 3) <= v->arch.pv_vcpu.ldt_ents);
+
     if ( is_pv_32bit_domain(d) )
-        gva = (u32)gva;
-    guest_get_eff_kern_l1e(gva, &l1e);
-    if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
-        return 0;
+        linear = (uint32_t)linear;
 
-    gmfn = l1e_get_pfn(l1e);
-    page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+    guest_get_eff_kern_l1e(linear, &gl1e);
+    if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
+        return false;
+
+    page = get_page_from_gfn(d, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
     if ( unlikely(!page) )
-        return 0;
+        return false;
 
-    okay = get_page_type(page, PGT_seg_desc_page);
-    if ( unlikely(!okay) )
+    if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) )
     {
         put_page(page);
-        return 0;
+        return false;
     }
 
-    nl1e = l1e_from_page(page, l1e_get_flags(l1e) | _PAGE_RW);
+    pl1e = &gdt_ldt_ptes(d, v)[(offset >> PAGE_SHIFT) + 16];
+    l1e_add_flags(gl1e, _PAGE_RW);
 
     spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
-    l1e_write(&gdt_ldt_ptes(d, v)[off + 16], nl1e);
+    l1e_write(pl1e, gl1e);
     v->arch.pv_vcpu.shadow_ldt_mapcnt++;
     spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
 
-    return 1;
+    return true;
 }
 
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index b93b3d1..f525fa2 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1112,7 +1112,7 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset,
     if ( likely(is_ldt_area) )
     {
         /* LDT fault: Copy a mapping from the guest's LDT, if it is valid. */
-        if ( likely(map_ldt_shadow_page(offset >> PAGE_SHIFT)) )
+        if ( likely(map_ldt_shadow_page(offset)) )
         {
             if ( guest_mode(regs) )
                 trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 5760e05..ec7ce3c 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -553,7 +553,7 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
 int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
 
-int map_ldt_shadow_page(unsigned int);
+bool map_ldt_shadow_page(unsigned int);
 
 #define NIL(type) ((type *)-sizeof(type))
 #define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 3/5] x86/pv: Consistently use typesafe helpers in all files
  2017-08-29 11:19 [PATCH 0/5] x86: Misc cleanup and improvements Andrew Cooper
  2017-08-29 11:19 ` [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t Andrew Cooper
  2017-08-29 11:19 ` [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup Andrew Cooper
@ 2017-08-29 11:19 ` Andrew Cooper
  2017-08-29 13:39   ` Wei Liu
  2017-08-29 11:19 ` [PATCH 4/5] x86/pv: Simplify access to the LDT/GDT ptes Andrew Cooper
  2017-08-29 11:19 ` [PATCH 5/5] x86/percpu: Misc cleanup Andrew Cooper
  4 siblings, 1 reply; 21+ messages in thread
From: Andrew Cooper @ 2017-08-29 11:19 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Wei Liu, Jan Beulich

Rather than having a mix of code behaviour.  This requires updating
pagetable_{get,from}_page() to use the non-overridden helpers.

This requires some adjustments in priv_op_{read,write}_cr(), which is most
easily done by switching CR3 handling to using mfn_t.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/pv/callback.c     |  6 ++++++
 xen/arch/x86/pv/domain.c       |  6 ++++++
 xen/arch/x86/pv/emul-gate-op.c |  6 ++++++
 xen/arch/x86/pv/emul-priv-op.c | 20 +++++++++++++-------
 xen/arch/x86/pv/iret.c         |  6 ++++++
 xen/arch/x86/pv/traps.c        |  6 ++++++
 xen/include/asm-x86/page.h     |  4 ++--
 7 files changed, 45 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/pv/callback.c b/xen/arch/x86/pv/callback.c
index 5957cb5..97d8438 100644
--- a/xen/arch/x86/pv/callback.c
+++ b/xen/arch/x86/pv/callback.c
@@ -31,6 +31,12 @@
 
 #include <public/callback.h>
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+
 static int register_guest_nmi_callback(unsigned long address)
 {
     struct vcpu *curr = current;
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 6cb61f2..c8b9cb6 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -11,6 +11,12 @@
 
 #include <asm/pv/domain.h>
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+
 static void noreturn continue_nonidle_domain(struct vcpu *v)
 {
     check_wakeup_from_wait();
diff --git a/xen/arch/x86/pv/emul-gate-op.c b/xen/arch/x86/pv/emul-gate-op.c
index cdf3b30..0a7381a 100644
--- a/xen/arch/x86/pv/emul-gate-op.c
+++ b/xen/arch/x86/pv/emul-gate-op.c
@@ -41,6 +41,12 @@
 
 #include "emulate.h"
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+
 static int read_gate_descriptor(unsigned int gate_sel,
                                 const struct vcpu *v,
                                 unsigned int *sel,
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index d50f519..af1624a 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -42,6 +42,12 @@
 #include "../x86_64/mmconfig.h"
 #include "emulate.h"
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+
 /***********************
  * I/O emulation support
  */
@@ -709,21 +715,21 @@ static int priv_op_read_cr(unsigned int reg, unsigned long *val,
     case 3: /* Read CR3 */
     {
         const struct domain *currd = curr->domain;
-        unsigned long mfn;
+        mfn_t mfn;
 
         if ( !is_pv_32bit_domain(currd) )
         {
-            mfn = pagetable_get_pfn(curr->arch.guest_table);
-            *val = xen_pfn_to_cr3(mfn_to_gmfn(currd, mfn));
+            mfn = pagetable_get_mfn(curr->arch.guest_table);
+            *val = xen_pfn_to_cr3(mfn_to_gmfn(currd, mfn_x(mfn)));
         }
         else
         {
             l4_pgentry_t *pl4e =
-                map_domain_page(_mfn(pagetable_get_pfn(curr->arch.guest_table)));
+                map_domain_page(pagetable_get_mfn(curr->arch.guest_table));
 
-            mfn = l4e_get_pfn(*pl4e);
+            mfn = l4e_get_mfn(*pl4e);
             unmap_domain_page(pl4e);
-            *val = compat_pfn_to_cr3(mfn_to_gmfn(currd, mfn));
+            *val = compat_pfn_to_cr3(mfn_to_gmfn(currd, mfn_x(mfn)));
         }
         /* PTs should not be shared */
         BUG_ON(page_get_owner(mfn_to_page(mfn)) == dom_cow);
@@ -768,7 +774,7 @@ static int priv_op_write_cr(unsigned int reg, unsigned long val,
         page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
         if ( !page )
             break;
-        rc = new_guest_cr3(page_to_mfn(page));
+        rc = new_guest_cr3(mfn_x(page_to_mfn(page)));
         put_page(page);
 
         switch ( rc )
diff --git a/xen/arch/x86/pv/iret.c b/xen/arch/x86/pv/iret.c
index ca433a6..56aeac3 100644
--- a/xen/arch/x86/pv/iret.c
+++ b/xen/arch/x86/pv/iret.c
@@ -24,6 +24,12 @@
 #include <asm/current.h>
 #include <asm/traps.h>
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+
 unsigned long do_iret(void)
 {
     struct cpu_user_regs *regs = guest_cpu_user_regs();
diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 2a605ed..d122881 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -29,6 +29,12 @@
 #include <asm/shared.h>
 #include <asm/traps.h>
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+
 void do_entry_int82(struct cpu_user_regs *regs)
 {
     if ( unlikely(untrusted_msi) )
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index b4ce5ae..cde5c6b 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -215,13 +215,13 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
 /* Page-table type. */
 typedef struct { u64 pfn; } pagetable_t;
 #define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
-#define pagetable_get_page(x)   mfn_to_page((x).pfn)
+#define pagetable_get_page(x)   __mfn_to_page((x).pfn)
 #define pagetable_get_pfn(x)    ((x).pfn)
 #define pagetable_get_mfn(x)    _mfn(((x).pfn))
 #define pagetable_is_null(x)    ((x).pfn == 0)
 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
-#define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg))
+#define pagetable_from_page(pg) pagetable_from_pfn(__page_to_mfn(pg))
 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
 #define pagetable_null()        pagetable_from_pfn(0)
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 4/5] x86/pv: Simplify access to the LDT/GDT ptes
  2017-08-29 11:19 [PATCH 0/5] x86: Misc cleanup and improvements Andrew Cooper
                   ` (2 preceding siblings ...)
  2017-08-29 11:19 ` [PATCH 3/5] x86/pv: Consistently use typesafe helpers in all files Andrew Cooper
@ 2017-08-29 11:19 ` Andrew Cooper
  2017-08-29 13:43   ` Wei Liu
  2017-08-29 14:23   ` Jan Beulich
  2017-08-29 11:19 ` [PATCH 5/5] x86/percpu: Misc cleanup Andrew Cooper
  4 siblings, 2 replies; 21+ messages in thread
From: Andrew Cooper @ 2017-08-29 11:19 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Wei Liu, Jan Beulich

Rename gdt_ldt_ptes() to pv_gdt_ptes() and drop the domain parameter, as it is
incorrect to use the helper with d != v->domain.

Introduce pv_ldt_ptes() to abstract away the fact that the LDT mapping is 16
slots after the GDT, and adjust the callers accordingly.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/domain.c        |  2 +-
 xen/arch/x86/mm.c            | 10 +++++-----
 xen/include/asm-x86/domain.h |  5 +++--
 3 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 57c44b1..dbddc53 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1591,7 +1591,7 @@ static void __context_switch(void)
     if ( need_full_gdt(nd) )
     {
         unsigned long mfn = virt_to_mfn(gdt);
-        l1_pgentry_t *pl1e = gdt_ldt_ptes(nd, n);
+        l1_pgentry_t *pl1e = pv_gdt_ptes(n);
         unsigned int i;
 
         for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ )
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c41ed1b..efcddd3 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -629,9 +629,9 @@ static void invalidate_shadow_ldt(struct vcpu *v, int flush)
         goto out;
 
     v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
-    pl1e = gdt_ldt_ptes(v->domain, v);
+    pl1e = pv_ldt_ptes(v);
 
-    for ( i = 16; i < 32; i++ )
+    for ( i = 0; i < 16; i++ )
     {
         if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
             continue;
@@ -701,7 +701,7 @@ bool map_ldt_shadow_page(unsigned int offset)
         return false;
     }
 
-    pl1e = &gdt_ldt_ptes(d, v)[(offset >> PAGE_SHIFT) + 16];
+    pl1e = &pv_ldt_ptes(v)[offset >> PAGE_SHIFT];
     l1e_add_flags(gl1e, _PAGE_RW);
 
     spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
@@ -4379,7 +4379,7 @@ void destroy_gdt(struct vcpu *v)
     unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
 
     v->arch.pv_vcpu.gdt_ents = 0;
-    pl1e = gdt_ldt_ptes(v->domain, v);
+    pl1e = pv_gdt_ptes(v);
     for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
     {
         pfn = l1e_get_pfn(pl1e[i]);
@@ -4424,7 +4424,7 @@ long set_gdt(struct vcpu *v,
 
     /* Install the new GDT. */
     v->arch.pv_vcpu.gdt_ents = entries;
-    pl1e = gdt_ldt_ptes(d, v);
+    pl1e = pv_gdt_ptes(v);
     for ( i = 0; i < nr_pages; i++ )
     {
         v->arch.pv_vcpu.gdt_frames[i] = frames[i];
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index c10522b..4999030 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -432,9 +432,10 @@ struct arch_domain
 
 #define gdt_ldt_pt_idx(v) \
       ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT))
-#define gdt_ldt_ptes(d, v) \
-    ((d)->arch.pv_domain.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
+#define pv_gdt_ptes(v) \
+    ((v)->domain->arch.pv_domain.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
      (((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)))
+#define pv_ldt_ptes(v) (pv_gdt_ptes(v) + 16)
 
 struct pv_vcpu
 {
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 5/5] x86/percpu: Misc cleanup
  2017-08-29 11:19 [PATCH 0/5] x86: Misc cleanup and improvements Andrew Cooper
                   ` (3 preceding siblings ...)
  2017-08-29 11:19 ` [PATCH 4/5] x86/pv: Simplify access to the LDT/GDT ptes Andrew Cooper
@ 2017-08-29 11:19 ` Andrew Cooper
  2017-08-29 13:44   ` Wei Liu
  4 siblings, 1 reply; 21+ messages in thread
From: Andrew Cooper @ 2017-08-29 11:19 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Jan Beulich

 * Drop unnecessary brackets.
 * Add spaces around binary operators.
 * Insert appropriate blank lines.
 * Insert a local variable block at the end.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
---
 xen/arch/x86/percpu.c | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c
index 1d3bc2e..c9997b7 100644
--- a/xen/arch/x86/percpu.c
+++ b/xen/arch/x86/percpu.c
@@ -13,11 +13,12 @@ unsigned long __per_cpu_offset[NR_CPUS];
  * context of PV guests.
  */
 #define INVALID_PERCPU_AREA (0x8000000000000000L - (long)__per_cpu_start)
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+#define PERCPU_ORDER get_order_from_bytes(__per_cpu_data_end - __per_cpu_start)
 
 void __init percpu_init_areas(void)
 {
     unsigned int cpu;
+
     for ( cpu = 1; cpu < NR_CPUS; cpu++ )
         __per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
 }
@@ -25,12 +26,16 @@ void __init percpu_init_areas(void)
 static int init_percpu_area(unsigned int cpu)
 {
     char *p;
+
     if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA )
         return -EBUSY;
+
     if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL )
         return -ENOMEM;
+
     memset(p, 0, __per_cpu_data_end - __per_cpu_start);
     __per_cpu_offset[cpu] = p - __per_cpu_start;
+
     return 0;
 }
 
@@ -45,6 +50,7 @@ static void _free_percpu_area(struct rcu_head *head)
     struct free_info *info = container_of(head, struct free_info, rcu);
     unsigned int cpu = info->cpu;
     char *p = __per_cpu_start + __per_cpu_offset[cpu];
+
     free_xenheap_pages(p, PERCPU_ORDER);
     __per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
 }
@@ -52,6 +58,7 @@ static void _free_percpu_area(struct rcu_head *head)
 static void free_percpu_area(unsigned int cpu)
 {
     struct free_info *info = &per_cpu(free_info, cpu);
+
     info->cpu = cpu;
     call_rcu(&info->rcu, _free_percpu_area);
 }
@@ -86,6 +93,16 @@ static struct notifier_block cpu_percpu_nfb = {
 static int __init percpu_presmp_init(void)
 {
     register_cpu_notifier(&cpu_percpu_nfb);
+
     return 0;
 }
 presmp_initcall(percpu_presmp_init);
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t
  2017-08-29 11:19 ` [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t Andrew Cooper
@ 2017-08-29 11:52   ` Tim Deegan
  2017-08-29 12:01   ` George Dunlap
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 21+ messages in thread
From: Tim Deegan @ 2017-08-29 11:52 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Wei Liu, Jan Beulich, Xen-devel

At 12:19 +0100 on 29 Aug (1504009152), Andrew Cooper wrote:
> And update all affected callers.  Fix the fill_ro_mpt() prototype to be bool
> like its implementation.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t
  2017-08-29 11:19 ` [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t Andrew Cooper
  2017-08-29 11:52   ` Tim Deegan
@ 2017-08-29 12:01   ` George Dunlap
  2017-08-29 12:17   ` Wei Liu
  2017-08-29 14:00   ` Jan Beulich
  3 siblings, 0 replies; 21+ messages in thread
From: George Dunlap @ 2017-08-29 12:01 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel; +Cc: George Dunlap, Tim Deegan, Wei Liu, Jan Beulich

On 08/29/2017 12:19 PM, Andrew Cooper wrote:
> And update all affected callers.  Fix the fill_ro_mpt() prototype to be bool
> like its implementation.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: George Dunlap <george.dunlap@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t
  2017-08-29 11:19 ` [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t Andrew Cooper
  2017-08-29 11:52   ` Tim Deegan
  2017-08-29 12:01   ` George Dunlap
@ 2017-08-29 12:17   ` Wei Liu
  2017-08-29 14:00   ` Jan Beulich
  3 siblings, 0 replies; 21+ messages in thread
From: Wei Liu @ 2017-08-29 12:17 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Tim Deegan, Wei Liu, Jan Beulich, Xen-devel

On Tue, Aug 29, 2017 at 12:19:12PM +0100, Andrew Cooper wrote:
> And update all affected callers.  Fix the fill_ro_mpt() prototype to be bool
> like its implementation.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup
  2017-08-29 11:19 ` [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup Andrew Cooper
@ 2017-08-29 12:35   ` Wei Liu
  2017-08-29 14:14   ` Jan Beulich
  2017-08-29 15:57   ` [PATCH v2 " Andrew Cooper
  2 siblings, 0 replies; 21+ messages in thread
From: Wei Liu @ 2017-08-29 12:35 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Wei Liu, Jan Beulich, Xen-devel

On Tue, Aug 29, 2017 at 12:19:13PM +0100, Andrew Cooper wrote:
> Switch the return value from int to bool, to match its semantics.  Switch its
> parameter from a frame offset to a byte offset (simplifying the sole caller)
> and allowing for an extra sanity check that the fault is within the LDT limit.
> 
> Drop the unnecessary gmfn and okay local variables, and correct the gva
> parameter to be named linear.  Rename l1e to gl1e, and simplify the
> construction of the new pte by simply taking (the now validated) gl1e and
> ensuring that _PAGE_RW is set.
> 
> Calculate the pte to updated outside of the spinlock, which halves the size of

be updated?

> the critical region.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

with some nitpicking.

> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Wei Liu <wei.liu2@citrix.com>
> ---
>  xen/arch/x86/mm.c        | 42 +++++++++++++++++++++++-------------------
>  xen/arch/x86/traps.c     |  2 +-
>  xen/include/asm-x86/mm.h |  2 +-
>  3 files changed, 25 insertions(+), 21 deletions(-)
> 
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 79780da..c41ed1b 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -667,45 +667,49 @@ static int alloc_segdesc_page(struct page_info *page)
>  }
>  
>  
> -/* Map shadow page at offset @off. */
> -int map_ldt_shadow_page(unsigned int off)
> +/*
> + * Map a guests LDT page (at @offset bytes from the start of the LDT) into

guest's

> + * Xen's virtual range.  Returns true if the mapping changed, false otherwise.

if the mapping is established / successful?

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 3/5] x86/pv: Consistently use typesafe helpers in all files
  2017-08-29 11:19 ` [PATCH 3/5] x86/pv: Consistently use typesafe helpers in all files Andrew Cooper
@ 2017-08-29 13:39   ` Wei Liu
  2017-08-29 14:19     ` Jan Beulich
  0 siblings, 1 reply; 21+ messages in thread
From: Wei Liu @ 2017-08-29 13:39 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Wei Liu, Jan Beulich, Xen-devel

On Tue, Aug 29, 2017 at 12:19:14PM +0100, Andrew Cooper wrote:
> Rather than having a mix of code behaviour.  This requires updating
> pagetable_{get,from}_page() to use the non-overridden helpers.
> 
> This requires some adjustments in priv_op_{read,write}_cr(), which is most
> easily done by switching CR3 handling to using mfn_t.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/5] x86/pv: Simplify access to the LDT/GDT ptes
  2017-08-29 11:19 ` [PATCH 4/5] x86/pv: Simplify access to the LDT/GDT ptes Andrew Cooper
@ 2017-08-29 13:43   ` Wei Liu
  2017-08-29 14:23   ` Jan Beulich
  1 sibling, 0 replies; 21+ messages in thread
From: Wei Liu @ 2017-08-29 13:43 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Wei Liu, Jan Beulich, Xen-devel

On Tue, Aug 29, 2017 at 12:19:15PM +0100, Andrew Cooper wrote:
> Rename gdt_ldt_ptes() to pv_gdt_ptes() and drop the domain parameter, as it is
> incorrect to use the helper with d != v->domain.
> 
> Introduce pv_ldt_ptes() to abstract away the fact that the LDT mapping is 16
> slots after the GDT, and adjust the callers accordingly.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 5/5] x86/percpu: Misc cleanup
  2017-08-29 11:19 ` [PATCH 5/5] x86/percpu: Misc cleanup Andrew Cooper
@ 2017-08-29 13:44   ` Wei Liu
  2017-08-29 14:24     ` Jan Beulich
  0 siblings, 1 reply; 21+ messages in thread
From: Wei Liu @ 2017-08-29 13:44 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Wei Liu, Jan Beulich, Xen-devel

On Tue, Aug 29, 2017 at 12:19:16PM +0100, Andrew Cooper wrote:
>  * Drop unnecessary brackets.
>  * Add spaces around binary operators.
>  * Insert appropriate blank lines.
>  * Insert a local variable block at the end.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t
  2017-08-29 11:19 ` [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t Andrew Cooper
                     ` (2 preceding siblings ...)
  2017-08-29 12:17   ` Wei Liu
@ 2017-08-29 14:00   ` Jan Beulich
  3 siblings, 0 replies; 21+ messages in thread
From: Jan Beulich @ 2017-08-29 14:00 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Tim Deegan, Wei Liu, Xen-devel

>>> On 29.08.17 at 13:19, <andrew.cooper3@citrix.com> wrote:
> And update all affected callers.  Fix the fill_ro_mpt() prototype to be bool
> like its implementation.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup
  2017-08-29 11:19 ` [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup Andrew Cooper
  2017-08-29 12:35   ` Wei Liu
@ 2017-08-29 14:14   ` Jan Beulich
  2017-08-29 14:54     ` Andrew Cooper
  2017-08-29 15:57   ` [PATCH v2 " Andrew Cooper
  2 siblings, 1 reply; 21+ messages in thread
From: Jan Beulich @ 2017-08-29 14:14 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Wei Liu, Xen-devel

>>> On 29.08.17 at 13:19, <andrew.cooper3@citrix.com> wrote:
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -667,45 +667,49 @@ static int alloc_segdesc_page(struct page_info *page)
>  }
>  
>  
> -/* Map shadow page at offset @off. */
> -int map_ldt_shadow_page(unsigned int off)
> +/*
> + * Map a guests LDT page (at @offset bytes from the start of the LDT) into

The comment is not really correct: The low 12 bits of offset don't
matter for where the page gets mapped. "(covering the byte at
@offset ..." perhaps?

> + * Xen's virtual range.  Returns true if the mapping changed, false otherwise.
> + */
> +bool map_ldt_shadow_page(unsigned int offset)
>  {
>      struct vcpu *v = current;
>      struct domain *d = v->domain;
> -    unsigned long gmfn;
>      struct page_info *page;
> -    l1_pgentry_t l1e, nl1e;
> -    unsigned long gva = v->arch.pv_vcpu.ldt_base + (off << PAGE_SHIFT);
> -    int okay;
> +    l1_pgentry_t gl1e, *pl1e;
> +    unsigned long linear = v->arch.pv_vcpu.ldt_base + offset;
>  
>      BUG_ON(unlikely(in_irq()));
>  
> +    /* Hardware limit checking should guarentee this property. */

guarantee?

> +    ASSERT((offset >> 3) <= v->arch.pv_vcpu.ldt_ents);

Can this validly be an ASSERT()? I.e. is there really no way for
ldt_ents for a vCPU to change between the hardware limit check
and execution making it here? MMUEXT_SET_LDT acts on current,
but vcpu_reset() clearing v->is_initialised and then
arch_set_info_guest() becoming usable on this vCPU is not that
trivial to exclude (i.e. at least the comment would probably want
extending).

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 3/5] x86/pv: Consistently use typesafe helpers in all files
  2017-08-29 13:39   ` Wei Liu
@ 2017-08-29 14:19     ` Jan Beulich
  0 siblings, 0 replies; 21+ messages in thread
From: Jan Beulich @ 2017-08-29 14:19 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: WeiLiu, Xen-devel

>>> On 29.08.17 at 15:39, <wei.liu2@citrix.com> wrote:
> On Tue, Aug 29, 2017 at 12:19:14PM +0100, Andrew Cooper wrote:
>> Rather than having a mix of code behaviour.  This requires updating
>> pagetable_{get,from}_page() to use the non-overridden helpers.
>> 
>> This requires some adjustments in priv_op_{read,write}_cr(), which is most
>> easily done by switching CR3 handling to using mfn_t.
>> 
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/5] x86/pv: Simplify access to the LDT/GDT ptes
  2017-08-29 11:19 ` [PATCH 4/5] x86/pv: Simplify access to the LDT/GDT ptes Andrew Cooper
  2017-08-29 13:43   ` Wei Liu
@ 2017-08-29 14:23   ` Jan Beulich
  1 sibling, 0 replies; 21+ messages in thread
From: Jan Beulich @ 2017-08-29 14:23 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Wei Liu, Xen-devel

>>> On 29.08.17 at 13:19, <andrew.cooper3@citrix.com> wrote:
> Rename gdt_ldt_ptes() to pv_gdt_ptes() and drop the domain parameter, as it is
> incorrect to use the helper with d != v->domain.

Indeed, but it was still done this way for a reason:

> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -432,9 +432,10 @@ struct arch_domain
>  
>  #define gdt_ldt_pt_idx(v) \
>        ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT))
> -#define gdt_ldt_ptes(d, v) \
> -    ((d)->arch.pv_domain.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
> +#define pv_gdt_ptes(v) \
> +    ((v)->domain->arch.pv_domain.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \

When the caller has latched v->domain into a local (register)
variable, the compiler normally can't know that v->domain
doesn't need to be re-read. But anyway, this isn't important
enough to block the change, so
Acked-by: Jan Beulich <jbeulich@suse.com>

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 5/5] x86/percpu: Misc cleanup
  2017-08-29 13:44   ` Wei Liu
@ 2017-08-29 14:24     ` Jan Beulich
  0 siblings, 0 replies; 21+ messages in thread
From: Jan Beulich @ 2017-08-29 14:24 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: WeiLiu, Xen-devel

>>> On 29.08.17 at 15:44, <wei.liu2@citrix.com> wrote:
> On Tue, Aug 29, 2017 at 12:19:16PM +0100, Andrew Cooper wrote:
>>  * Drop unnecessary brackets.
>>  * Add spaces around binary operators.
>>  * Insert appropriate blank lines.
>>  * Insert a local variable block at the end.
>> 
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup
  2017-08-29 14:14   ` Jan Beulich
@ 2017-08-29 14:54     ` Andrew Cooper
  0 siblings, 0 replies; 21+ messages in thread
From: Andrew Cooper @ 2017-08-29 14:54 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Wei Liu, Xen-devel

On 29/08/17 15:14, Jan Beulich wrote:
>>>> On 29.08.17 at 13:19, <andrew.cooper3@citrix.com> wrote:
>> --- a/xen/arch/x86/mm.c
>> +++ b/xen/arch/x86/mm.c
>> @@ -667,45 +667,49 @@ static int alloc_segdesc_page(struct page_info *page)
>>  }
>>  
>>  
>> -/* Map shadow page at offset @off. */
>> -int map_ldt_shadow_page(unsigned int off)
>> +/*
>> + * Map a guests LDT page (at @offset bytes from the start of the LDT) into
> The comment is not really correct: The low 12 bits of offset don't
> matter for where the page gets mapped. "(covering the byte at
> @offset ..." perhaps?

Ok.

>
>> + * Xen's virtual range.  Returns true if the mapping changed, false otherwise.
>> + */
>> +bool map_ldt_shadow_page(unsigned int offset)
>>  {
>>      struct vcpu *v = current;
>>      struct domain *d = v->domain;
>> -    unsigned long gmfn;
>>      struct page_info *page;
>> -    l1_pgentry_t l1e, nl1e;
>> -    unsigned long gva = v->arch.pv_vcpu.ldt_base + (off << PAGE_SHIFT);
>> -    int okay;
>> +    l1_pgentry_t gl1e, *pl1e;
>> +    unsigned long linear = v->arch.pv_vcpu.ldt_base + offset;
>>  
>>      BUG_ON(unlikely(in_irq()));
>>  
>> +    /* Hardware limit checking should guarentee this property. */
> guarantee?

Yes.

>
>> +    ASSERT((offset >> 3) <= v->arch.pv_vcpu.ldt_ents);
> Can this validly be an ASSERT()? I.e. is there really no way for
> ldt_ents for a vCPU to change between the hardware limit check
> and execution making it here? MMUEXT_SET_LDT acts on current,
> but vcpu_reset() clearing v->is_initialised and then
> arch_set_info_guest() becoming usable on this vCPU is not that
> trivial to exclude (i.e. at least the comment would probably want
> extending).

vcpu_reset() calls vcpu_pause() first, which will block until the #PF
handler completes.

arch_set_info_guest() can't be called on already-initialised vcpus.

I will extend the comment to explain why the ASSERT() is safe.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH v2 2/5] x86/pv: map_ldt_shadow_page() cleanup
  2017-08-29 11:19 ` [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup Andrew Cooper
  2017-08-29 12:35   ` Wei Liu
  2017-08-29 14:14   ` Jan Beulich
@ 2017-08-29 15:57   ` Andrew Cooper
  2017-08-30  8:21     ` Jan Beulich
  2 siblings, 1 reply; 21+ messages in thread
From: Andrew Cooper @ 2017-08-29 15:57 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Wei Liu, Jan Beulich

Switch the return value from int to bool, to match its semantics.  Switch its
parameter from a frame offset to a byte offset (simplifying the sole caller)
and allowing for an extra sanity check that the fault is within the LDT limit.

Drop the unnecessary gmfn and okay local variables, and correct the gva
parameter to be named linear.  Rename l1e to gl1e, and simplify the
construction of the new pte by simply taking (the now validated) gl1e and
ensuring that _PAGE_RW is set.

Calculate the pte to be updated outside of the spinlock, which halves the size
of the critical region.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>

v2:
 * Adjustments to various comments.
---
 xen/arch/x86/mm.c        | 48 +++++++++++++++++++++++++++++-------------------
 xen/arch/x86/traps.c     |  2 +-
 xen/include/asm-x86/mm.h |  2 +-
 3 files changed, 31 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 79780da..f1983b5 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -667,45 +667,55 @@ static int alloc_segdesc_page(struct page_info *page)
 }
 
 
-/* Map shadow page at offset @off. */
-int map_ldt_shadow_page(unsigned int off)
+/*
+ * Map a guest's LDT page (covering the byte at @offset from start of the LDT)
+ * into Xen's virtual range.  Returns true if the mapping changed, false
+ * otherwise.
+ */
+bool map_ldt_shadow_page(unsigned int offset)
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    unsigned long gmfn;
     struct page_info *page;
-    l1_pgentry_t l1e, nl1e;
-    unsigned long gva = v->arch.pv_vcpu.ldt_base + (off << PAGE_SHIFT);
-    int okay;
+    l1_pgentry_t gl1e, *pl1e;
+    unsigned long linear = v->arch.pv_vcpu.ldt_base + offset;
 
     BUG_ON(unlikely(in_irq()));
 
+    /*
+     * Hardware limit checking should guarantee this property.  NB. This is
+     * safe as updates to the LDT can only be made by MMUEXT_SET_LDT to the
+     * current vcpu, and vcpu_reset() will block until this vcpu has been
+     * descheduled before continuing.
+     */
+    ASSERT((offset >> 3) <= v->arch.pv_vcpu.ldt_ents);
+
     if ( is_pv_32bit_domain(d) )
-        gva = (u32)gva;
-    guest_get_eff_kern_l1e(gva, &l1e);
-    if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
-        return 0;
+        linear = (uint32_t)linear;
 
-    gmfn = l1e_get_pfn(l1e);
-    page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+    guest_get_eff_kern_l1e(linear, &gl1e);
+    if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
+        return false;
+
+    page = get_page_from_gfn(d, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
     if ( unlikely(!page) )
-        return 0;
+        return false;
 
-    okay = get_page_type(page, PGT_seg_desc_page);
-    if ( unlikely(!okay) )
+    if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) )
     {
         put_page(page);
-        return 0;
+        return false;
     }
 
-    nl1e = l1e_from_page(page, l1e_get_flags(l1e) | _PAGE_RW);
+    pl1e = &gdt_ldt_ptes(d, v)[(offset >> PAGE_SHIFT) + 16];
+    l1e_add_flags(gl1e, _PAGE_RW);
 
     spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
-    l1e_write(&gdt_ldt_ptes(d, v)[off + 16], nl1e);
+    l1e_write(pl1e, gl1e);
     v->arch.pv_vcpu.shadow_ldt_mapcnt++;
     spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
 
-    return 1;
+    return true;
 }
 
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index b93b3d1..f525fa2 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1112,7 +1112,7 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset,
     if ( likely(is_ldt_area) )
     {
         /* LDT fault: Copy a mapping from the guest's LDT, if it is valid. */
-        if ( likely(map_ldt_shadow_page(offset >> PAGE_SHIFT)) )
+        if ( likely(map_ldt_shadow_page(offset)) )
         {
             if ( guest_mode(regs) )
                 trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 5760e05..ec7ce3c 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -553,7 +553,7 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
 int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
 
-int map_ldt_shadow_page(unsigned int);
+bool map_ldt_shadow_page(unsigned int);
 
 #define NIL(type) ((type *)-sizeof(type))
 #define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH v2 2/5] x86/pv: map_ldt_shadow_page() cleanup
  2017-08-29 15:57   ` [PATCH v2 " Andrew Cooper
@ 2017-08-30  8:21     ` Jan Beulich
  0 siblings, 0 replies; 21+ messages in thread
From: Jan Beulich @ 2017-08-30  8:21 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Wei Liu, Xen-devel

>>> On 29.08.17 at 17:57, <andrew.cooper3@citrix.com> wrote:
> Switch the return value from int to bool, to match its semantics.  Switch its
> parameter from a frame offset to a byte offset (simplifying the sole caller)
> and allowing for an extra sanity check that the fault is within the LDT 
> limit.
> 
> Drop the unnecessary gmfn and okay local variables, and correct the gva
> parameter to be named linear.  Rename l1e to gl1e, and simplify the
> construction of the new pte by simply taking (the now validated) gl1e and
> ensuring that _PAGE_RW is set.
> 
> Calculate the pte to be updated outside of the spinlock, which halves the size
> of the critical region.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>
> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Wei Liu <wei.liu2@citrix.com>
> 
> v2:
>  * Adjustments to various comments.

Thanks!

Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2017-08-30  8:21 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-29 11:19 [PATCH 0/5] x86: Misc cleanup and improvements Andrew Cooper
2017-08-29 11:19 ` [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t Andrew Cooper
2017-08-29 11:52   ` Tim Deegan
2017-08-29 12:01   ` George Dunlap
2017-08-29 12:17   ` Wei Liu
2017-08-29 14:00   ` Jan Beulich
2017-08-29 11:19 ` [PATCH 2/5] x86/pv: map_ldt_shadow_page() cleanup Andrew Cooper
2017-08-29 12:35   ` Wei Liu
2017-08-29 14:14   ` Jan Beulich
2017-08-29 14:54     ` Andrew Cooper
2017-08-29 15:57   ` [PATCH v2 " Andrew Cooper
2017-08-30  8:21     ` Jan Beulich
2017-08-29 11:19 ` [PATCH 3/5] x86/pv: Consistently use typesafe helpers in all files Andrew Cooper
2017-08-29 13:39   ` Wei Liu
2017-08-29 14:19     ` Jan Beulich
2017-08-29 11:19 ` [PATCH 4/5] x86/pv: Simplify access to the LDT/GDT ptes Andrew Cooper
2017-08-29 13:43   ` Wei Liu
2017-08-29 14:23   ` Jan Beulich
2017-08-29 11:19 ` [PATCH 5/5] x86/percpu: Misc cleanup Andrew Cooper
2017-08-29 13:44   ` Wei Liu
2017-08-29 14:24     ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.