All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init()
@ 2017-01-30 17:26 Andrew Cooper
  2017-01-30 17:26 ` [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu Andrew Cooper
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Andrew Cooper @ 2017-01-30 17:26 UTC (permalink / raw)
  To: Xen-devel; +Cc: George Dunlap, Andrew Cooper, Tim Deegan, Jan Beulich

No functional change yet, but required for subsequent changes.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/arch/x86/domain.c           | 3 ++-
 xen/arch/x86/mm/hap/hap.c       | 3 ++-
 xen/arch/x86/mm/paging.c        | 6 +++---
 xen/arch/x86/mm/shadow/common.c | 3 ++-
 xen/arch/x86/mm/shadow/none.c   | 3 ++-
 xen/include/asm-x86/hap.h       | 2 +-
 xen/include/asm-x86/paging.h    | 2 +-
 xen/include/asm-x86/shadow.h    | 2 +-
 8 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 71c0e3c..4edc4c8 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -384,7 +384,8 @@ int vcpu_initialise(struct vcpu *v)
 
     if ( !is_idle_domain(d) )
     {
-        paging_vcpu_init(v);
+        if ( (rc = paging_vcpu_init(v)) )
+            return rc;
 
         if ( (rc = vcpu_init_fpu(v)) != 0 )
             return rc;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 6dbb3cc..2d52dbd 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -641,10 +641,11 @@ static const struct paging_mode hap_paging_protected_mode;
 static const struct paging_mode hap_paging_pae_mode;
 static const struct paging_mode hap_paging_long_mode;
 
-void hap_vcpu_init(struct vcpu *v)
+int hap_vcpu_init(struct vcpu *v)
 {
     v->arch.paging.mode = &hap_paging_real_mode;
     v->arch.paging.nestedmode = &hap_paging_real_mode;
+    return 0;
 }
 
 /************************************************/
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index d964ed5..bd4f469 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -673,12 +673,12 @@ int paging_domain_init(struct domain *d, unsigned int domcr_flags)
 }
 
 /* vcpu paging struct initialization goes here */
-void paging_vcpu_init(struct vcpu *v)
+int paging_vcpu_init(struct vcpu *v)
 {
     if ( hap_enabled(v->domain) )
-        hap_vcpu_init(v);
+        return hap_vcpu_init(v);
     else
-        shadow_vcpu_init(v);
+        return shadow_vcpu_init(v);
 }
 
 
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index a619d65..aa0b8f0 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -81,7 +81,7 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
  * matter to have v->arch.paging.mode pointing to any mode, as long as it can
  * be compiled.
  */
-void shadow_vcpu_init(struct vcpu *v)
+int shadow_vcpu_init(struct vcpu *v)
 {
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     int i, j;
@@ -98,6 +98,7 @@ void shadow_vcpu_init(struct vcpu *v)
     v->arch.paging.mode = is_pv_vcpu(v) ?
                           &SHADOW_INTERNAL_NAME(sh_paging_mode, 4) :
                           &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
+    return 0;
 }
 
 #if SHADOW_AUDIT
diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
index 69e56c5..81ab42a 100644
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -71,8 +71,9 @@ static const struct paging_mode sh_paging_none = {
     .write_p2m_entry               = _write_p2m_entry,
 };
 
-void shadow_vcpu_init(struct vcpu *v)
+int shadow_vcpu_init(struct vcpu *v)
 {
     ASSERT(is_pv_vcpu(v));
     v->arch.paging.mode = &sh_paging_none;
+    return 0;
 }
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index 88587c4..ca55328 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -39,7 +39,7 @@ int   hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
 int   hap_enable(struct domain *d, u32 mode);
 void  hap_final_teardown(struct domain *d);
 void  hap_teardown(struct domain *d, bool *preempted);
-void  hap_vcpu_init(struct vcpu *v);
+int   hap_vcpu_init(struct vcpu *v);
 int   hap_track_dirty_vram(struct domain *d,
                            unsigned long begin_pfn,
                            unsigned long nr,
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index cec6bfd..4857871 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -197,7 +197,7 @@ struct sh_dirty_vram {
 
 /* Initialize the paging resource for vcpu struct. It is called by
  * vcpu_initialise() in domain.c */
-void paging_vcpu_init(struct vcpu *v);
+int paging_vcpu_init(struct vcpu *v);
 
 /* Set up the paging-assistance-specific parts of a domain struct at
  * start of day.  Called for every domain from arch_domain_create() */
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 7e1ed3b..da83188 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -52,7 +52,7 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags);
 
 /* Setup the shadow-specific parts of a vcpu struct. It is called by
  * paging_vcpu_init() in paging.c */
-void shadow_vcpu_init(struct vcpu *v);
+int shadow_vcpu_init(struct vcpu *v);
 
 #ifdef CONFIG_SHADOW_PAGING
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu
  2017-01-30 17:26 [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init() Andrew Cooper
@ 2017-01-30 17:26 ` Andrew Cooper
  2017-01-31 10:35   ` Jan Beulich
  2017-02-08 10:20   ` Tim Deegan
  2017-01-30 17:48 ` [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init() George Dunlap
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 7+ messages in thread
From: Andrew Cooper @ 2017-01-30 17:26 UTC (permalink / raw)
  To: Xen-devel; +Cc: George Dunlap, Andrew Cooper, Tim Deegan, Jan Beulich

The vTLB and last_write* booleans are used exclusively by the shadow pagetable
code.  Move them from paging_vcpu to shadow_vcpu, which causes them to be
entirely omitted on a build without shadow paging support.

While changing the qualified names of these variables, drop an unnessary NULL
check before freeing the vTLB, and move allocation of the vTLB from
sh_update_paging_modes() to shadow_vcpu_init() where it more logically
belongs.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/arch/x86/mm/shadow/common.c  | 35 ++++++++++++-----------------------
 xen/arch/x86/mm/shadow/multi.c   | 22 +++++++++++-----------
 xen/arch/x86/mm/shadow/private.h | 24 ++++++++++++------------
 xen/include/asm-x86/domain.h     | 18 ++++++++++--------
 4 files changed, 45 insertions(+), 54 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index aa0b8f0..1075d56 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -95,6 +95,14 @@ int shadow_vcpu_init(struct vcpu *v)
     }
 #endif
 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
+    /* Allocate a virtual TLB for this vcpu. */
+    v->arch.paging.shadow.vtlb = xzalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
+    if ( !v->arch.paging.shadow.vtlb )
+        return -ENOMEM;
+    spin_lock_init(&v->arch.paging.shadow.vtlb_lock);
+#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
+
     v->arch.paging.mode = is_pv_vcpu(v) ?
                           &SHADOW_INTERNAL_NAME(sh_paging_mode, 4) :
                           &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
@@ -1459,7 +1467,7 @@ void shadow_free(struct domain *d, mfn_t smfn)
                 v->arch.paging.shadow.last_writeable_pte_smfn = 0;
 #endif
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
-            v->arch.paging.last_write_emul_ok = 0;
+            v->arch.paging.shadow.last_write_emul_ok = 0;
 #endif
         }
 #endif
@@ -1680,7 +1688,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr,
     mfn = page_to_mfn(page);
     ASSERT(mfn_valid(mfn));
 
-    v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
+    v->arch.paging.shadow.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
     /*
      * Note shadow cannot page out or unshare this mfn, so the map won't
      * disappear. Otherwise, caller must hold onto page until done.
@@ -2864,22 +2872,6 @@ static void sh_update_paging_modes(struct vcpu *v)
 
     ASSERT(paging_locked_by_me(d));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
-    /* Make sure this vcpu has a virtual TLB array allocated */
-    if ( unlikely(!v->arch.paging.vtlb) )
-    {
-        v->arch.paging.vtlb = xzalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
-        if ( unlikely(!v->arch.paging.vtlb) )
-        {
-            SHADOW_ERROR("Could not allocate vTLB space for dom %u vcpu %u\n",
-                         d->domain_id, v->vcpu_id);
-            domain_crash(v->domain);
-            return;
-        }
-        spin_lock_init(&v->arch.paging.vtlb_lock);
-    }
-#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
-
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     if ( mfn_eq(v->arch.paging.shadow.oos_snapshot[0], INVALID_MFN) )
     {
@@ -3206,11 +3198,8 @@ void shadow_teardown(struct domain *d, bool *preempted)
     for_each_vcpu(d, v)
     {
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
-        if ( v->arch.paging.vtlb )
-        {
-            xfree(v->arch.paging.vtlb);
-            v->arch.paging.vtlb = NULL;
-        }
+        xfree(v->arch.paging.shadow.vtlb);
+        v->arch.paging.shadow.vtlb = NULL;
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index d4090d7..20db60f 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2881,7 +2881,7 @@ static int sh_page_fault(struct vcpu *v,
      * it's highly likely to reach same emulation action for this frame.
      * Then try to emulate early to avoid lock aquisition.
      */
-    if ( v->arch.paging.last_write_emul_ok
+    if ( v->arch.paging.shadow.last_write_emul_ok
          && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) )
     {
         /* check whether error code is 3, or else fall back to normal path
@@ -2898,7 +2898,7 @@ static int sh_page_fault(struct vcpu *v,
             if ( mfn_valid(gmfn) && mfn_is_out_of_sync(gmfn) )
             {
                 fast_emul = 0;
-                v->arch.paging.last_write_emul_ok = 0;
+                v->arch.paging.shadow.last_write_emul_ok = 0;
                 goto page_fault_slow_path;
             }
 #endif /* OOS */
@@ -2907,7 +2907,7 @@ static int sh_page_fault(struct vcpu *v,
             goto early_emulation;
         }
         else
-            v->arch.paging.last_write_emul_ok = 0;
+            v->arch.paging.shadow.last_write_emul_ok = 0;
     }
 #endif
 
@@ -3344,7 +3344,7 @@ static int sh_page_fault(struct vcpu *v,
             if ( fast_emul )
             {
                 perfc_incr(shadow_fault_fast_emulate_fail);
-                v->arch.paging.last_write_emul_ok = 0;
+                v->arch.paging.shadow.last_write_emul_ok = 0;
             }
 #endif
             gdprintk(XENLOG_DEBUG, "write to pagetable during event "
@@ -3399,7 +3399,7 @@ static int sh_page_fault(struct vcpu *v,
         if ( fast_emul )
         {
             perfc_incr(shadow_fault_fast_emulate_fail);
-            v->arch.paging.last_write_emul_ok = 0;
+            v->arch.paging.shadow.last_write_emul_ok = 0;
         }
 #endif
         SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n",
@@ -3429,11 +3429,11 @@ static int sh_page_fault(struct vcpu *v,
         {
             v->arch.paging.shadow.last_emulated_frame = va >> PAGE_SHIFT;
             v->arch.paging.shadow.last_emulated_mfn = mfn_x(gmfn);
-            v->arch.paging.last_write_emul_ok = 1;
+            v->arch.paging.shadow.last_write_emul_ok = 1;
         }
     }
     else if ( fast_emul )
-        v->arch.paging.last_write_emul_ok = 0;
+        v->arch.paging.shadow.last_write_emul_ok = 0;
 #endif
 
     if ( emul_ctxt.ctxt.retire.singlestep )
@@ -3452,7 +3452,7 @@ static int sh_page_fault(struct vcpu *v,
         for ( i = 0 ; i < 4 ; i++ )
         {
             shadow_continue_emulation(&emul_ctxt, regs);
-            v->arch.paging.last_write_was_pt = 0;
+            v->arch.paging.shadow.last_write_was_pt = 0;
             r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
 
             /*
@@ -3463,7 +3463,7 @@ static int sh_page_fault(struct vcpu *v,
             if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.retire.raw )
             {
                 emulation_count++;
-                if ( v->arch.paging.last_write_was_pt )
+                if ( v->arch.paging.shadow.last_write_was_pt )
                 {
                     perfc_incr(shadow_em_ex_pt);
                     TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_2ND_PT_WRITTEN);
@@ -3539,7 +3539,7 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
 #endif
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
-    v->arch.paging.last_write_emul_ok = 0;
+    v->arch.paging.shadow.last_write_emul_ok = 0;
 #endif
 
     /* First check that we can safely read the shadow l2e.  SMP/PAE linux can
@@ -4232,7 +4232,7 @@ sh_update_cr3(struct vcpu *v, int do_locking)
 #endif
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
-    v->arch.paging.last_write_emul_ok = 0;
+    v->arch.paging.shadow.last_write_emul_ok = 0;
 #endif
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index f0b0ed4..5649e81 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -768,9 +768,9 @@ struct shadow_vtlb {
 /* Call whenever the guest flushes hit actual TLB */
 static inline void vtlb_flush(struct vcpu *v)
 {
-    spin_lock(&v->arch.paging.vtlb_lock);
-    memset(v->arch.paging.vtlb, 0, VTLB_ENTRIES * sizeof (struct shadow_vtlb));
-    spin_unlock(&v->arch.paging.vtlb_lock);
+    spin_lock(&v->arch.paging.shadow.vtlb_lock);
+    memset(v->arch.paging.shadow.vtlb, 0, VTLB_ENTRIES * sizeof(struct shadow_vtlb));
+    spin_unlock(&v->arch.paging.shadow.vtlb_lock);
 }
 
 static inline int vtlb_hash(unsigned long page_number)
@@ -784,9 +784,9 @@ static inline void vtlb_insert(struct vcpu *v, unsigned long page,
 {
     struct shadow_vtlb entry =
         { .page_number = page, .frame_number = frame, .pfec = pfec };
-    spin_lock(&v->arch.paging.vtlb_lock);
-    v->arch.paging.vtlb[vtlb_hash(page)] = entry;
-    spin_unlock(&v->arch.paging.vtlb_lock);
+    spin_lock(&v->arch.paging.shadow.vtlb_lock);
+    v->arch.paging.shadow.vtlb[vtlb_hash(page)] = entry;
+    spin_unlock(&v->arch.paging.shadow.vtlb_lock);
 }
 
 /* Look a translation up in the vTLB.  Returns INVALID_GFN if not found. */
@@ -797,15 +797,15 @@ static inline unsigned long vtlb_lookup(struct vcpu *v,
     unsigned long frame_number = gfn_x(INVALID_GFN);
     int i = vtlb_hash(page_number);
 
-    spin_lock(&v->arch.paging.vtlb_lock);
-    if ( v->arch.paging.vtlb[i].pfec != 0
-         && v->arch.paging.vtlb[i].page_number == page_number
+    spin_lock(&v->arch.paging.shadow.vtlb_lock);
+    if ( v->arch.paging.shadow.vtlb[i].pfec != 0
+         && v->arch.paging.shadow.vtlb[i].page_number == page_number
          /* Any successful walk that had at least these pfec bits is OK */
-         && (v->arch.paging.vtlb[i].pfec & pfec) == pfec )
+         && (v->arch.paging.shadow.vtlb[i].pfec & pfec) == pfec )
     {
-        frame_number = v->arch.paging.vtlb[i].frame_number;
+        frame_number = v->arch.paging.shadow.vtlb[i].frame_number;
     }
-    spin_unlock(&v->arch.paging.vtlb_lock);
+    spin_unlock(&v->arch.paging.shadow.vtlb_lock);
     return frame_number;
 }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index e6c7e13..3e7d791 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -147,7 +147,16 @@ struct shadow_vcpu {
         unsigned long off[SHADOW_OOS_FIXUPS];
     } oos_fixup[SHADOW_OOS_PAGES];
 
-    bool_t pagetable_dying;
+    /* Translated guest: virtual TLB */
+    struct shadow_vtlb *vtlb;
+    spinlock_t          vtlb_lock;
+
+    /* HVM guest: last emulate was to a pagetable */
+    bool last_write_was_pt;
+    /* HVM guest: last write emulation succeeds */
+    bool last_write_emul_ok;
+
+    bool pagetable_dying;
 #endif
 };
 
@@ -222,13 +231,6 @@ struct paging_vcpu {
     const struct paging_mode *mode;
     /* Nested Virtualization: paging mode of nested guest */
     const struct paging_mode *nestedmode;
-    /* HVM guest: last emulate was to a pagetable */
-    unsigned int last_write_was_pt:1;
-    /* HVM guest: last write emulation succeeds */
-    unsigned int last_write_emul_ok:1;
-    /* Translated guest: virtual TLB */
-    struct shadow_vtlb *vtlb;
-    spinlock_t          vtlb_lock;
 
     /* paging support extension */
     struct shadow_vcpu shadow;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init()
  2017-01-30 17:26 [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init() Andrew Cooper
  2017-01-30 17:26 ` [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu Andrew Cooper
@ 2017-01-30 17:48 ` George Dunlap
  2017-01-31 10:27 ` Jan Beulich
  2017-02-08 10:14 ` Tim Deegan
  3 siblings, 0 replies; 7+ messages in thread
From: George Dunlap @ 2017-01-30 17:48 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel; +Cc: George Dunlap, Tim Deegan, Jan Beulich

On 30/01/17 17:26, Andrew Cooper wrote:
> No functional change yet, but required for subsequent changes.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: George Dunlap <george.dunlap@citrix.com>

> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Tim Deegan <tim@xen.org>
> CC: George Dunlap <george.dunlap@eu.citrix.com>
> ---
>  xen/arch/x86/domain.c           | 3 ++-
>  xen/arch/x86/mm/hap/hap.c       | 3 ++-
>  xen/arch/x86/mm/paging.c        | 6 +++---
>  xen/arch/x86/mm/shadow/common.c | 3 ++-
>  xen/arch/x86/mm/shadow/none.c   | 3 ++-
>  xen/include/asm-x86/hap.h       | 2 +-
>  xen/include/asm-x86/paging.h    | 2 +-
>  xen/include/asm-x86/shadow.h    | 2 +-
>  8 files changed, 14 insertions(+), 10 deletions(-)
> 
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 71c0e3c..4edc4c8 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -384,7 +384,8 @@ int vcpu_initialise(struct vcpu *v)
>  
>      if ( !is_idle_domain(d) )
>      {
> -        paging_vcpu_init(v);
> +        if ( (rc = paging_vcpu_init(v)) )
> +            return rc;
>  
>          if ( (rc = vcpu_init_fpu(v)) != 0 )
>              return rc;
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index 6dbb3cc..2d52dbd 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -641,10 +641,11 @@ static const struct paging_mode hap_paging_protected_mode;
>  static const struct paging_mode hap_paging_pae_mode;
>  static const struct paging_mode hap_paging_long_mode;
>  
> -void hap_vcpu_init(struct vcpu *v)
> +int hap_vcpu_init(struct vcpu *v)
>  {
>      v->arch.paging.mode = &hap_paging_real_mode;
>      v->arch.paging.nestedmode = &hap_paging_real_mode;
> +    return 0;
>  }
>  
>  /************************************************/
> diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
> index d964ed5..bd4f469 100644
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -673,12 +673,12 @@ int paging_domain_init(struct domain *d, unsigned int domcr_flags)
>  }
>  
>  /* vcpu paging struct initialization goes here */
> -void paging_vcpu_init(struct vcpu *v)
> +int paging_vcpu_init(struct vcpu *v)
>  {
>      if ( hap_enabled(v->domain) )
> -        hap_vcpu_init(v);
> +        return hap_vcpu_init(v);
>      else
> -        shadow_vcpu_init(v);
> +        return shadow_vcpu_init(v);
>  }
>  
>  
> diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
> index a619d65..aa0b8f0 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -81,7 +81,7 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
>   * matter to have v->arch.paging.mode pointing to any mode, as long as it can
>   * be compiled.
>   */
> -void shadow_vcpu_init(struct vcpu *v)
> +int shadow_vcpu_init(struct vcpu *v)
>  {
>  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
>      int i, j;
> @@ -98,6 +98,7 @@ void shadow_vcpu_init(struct vcpu *v)
>      v->arch.paging.mode = is_pv_vcpu(v) ?
>                            &SHADOW_INTERNAL_NAME(sh_paging_mode, 4) :
>                            &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
> +    return 0;
>  }
>  
>  #if SHADOW_AUDIT
> diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
> index 69e56c5..81ab42a 100644
> --- a/xen/arch/x86/mm/shadow/none.c
> +++ b/xen/arch/x86/mm/shadow/none.c
> @@ -71,8 +71,9 @@ static const struct paging_mode sh_paging_none = {
>      .write_p2m_entry               = _write_p2m_entry,
>  };
>  
> -void shadow_vcpu_init(struct vcpu *v)
> +int shadow_vcpu_init(struct vcpu *v)
>  {
>      ASSERT(is_pv_vcpu(v));
>      v->arch.paging.mode = &sh_paging_none;
> +    return 0;
>  }
> diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
> index 88587c4..ca55328 100644
> --- a/xen/include/asm-x86/hap.h
> +++ b/xen/include/asm-x86/hap.h
> @@ -39,7 +39,7 @@ int   hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
>  int   hap_enable(struct domain *d, u32 mode);
>  void  hap_final_teardown(struct domain *d);
>  void  hap_teardown(struct domain *d, bool *preempted);
> -void  hap_vcpu_init(struct vcpu *v);
> +int   hap_vcpu_init(struct vcpu *v);
>  int   hap_track_dirty_vram(struct domain *d,
>                             unsigned long begin_pfn,
>                             unsigned long nr,
> diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
> index cec6bfd..4857871 100644
> --- a/xen/include/asm-x86/paging.h
> +++ b/xen/include/asm-x86/paging.h
> @@ -197,7 +197,7 @@ struct sh_dirty_vram {
>  
>  /* Initialize the paging resource for vcpu struct. It is called by
>   * vcpu_initialise() in domain.c */
> -void paging_vcpu_init(struct vcpu *v);
> +int paging_vcpu_init(struct vcpu *v);
>  
>  /* Set up the paging-assistance-specific parts of a domain struct at
>   * start of day.  Called for every domain from arch_domain_create() */
> diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
> index 7e1ed3b..da83188 100644
> --- a/xen/include/asm-x86/shadow.h
> +++ b/xen/include/asm-x86/shadow.h
> @@ -52,7 +52,7 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags);
>  
>  /* Setup the shadow-specific parts of a vcpu struct. It is called by
>   * paging_vcpu_init() in paging.c */
> -void shadow_vcpu_init(struct vcpu *v);
> +int shadow_vcpu_init(struct vcpu *v);
>  
>  #ifdef CONFIG_SHADOW_PAGING
>  
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init()
  2017-01-30 17:26 [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init() Andrew Cooper
  2017-01-30 17:26 ` [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu Andrew Cooper
  2017-01-30 17:48 ` [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init() George Dunlap
@ 2017-01-31 10:27 ` Jan Beulich
  2017-02-08 10:14 ` Tim Deegan
  3 siblings, 0 replies; 7+ messages in thread
From: Jan Beulich @ 2017-01-31 10:27 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Tim Deegan, Xen-devel

>>> On 30.01.17 at 18:26, <andrew.cooper3@citrix.com> wrote:
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -384,7 +384,8 @@ int vcpu_initialise(struct vcpu *v)
>  
>      if ( !is_idle_domain(d) )
>      {
> -        paging_vcpu_init(v);
> +        if ( (rc = paging_vcpu_init(v)) )
> +            return rc;
>  
>          if ( (rc = vcpu_init_fpu(v)) != 0 )
>              return rc;

With the function so far having been impossible to fail, it was also
pretty clear that no meaningful cleanup would need doing after it.
Now that it can fail, is it really appropriate to return without
undoing its effects if vcpu_init_fpu() fails?

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu
  2017-01-30 17:26 ` [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu Andrew Cooper
@ 2017-01-31 10:35   ` Jan Beulich
  2017-02-08 10:20   ` Tim Deegan
  1 sibling, 0 replies; 7+ messages in thread
From: Jan Beulich @ 2017-01-31 10:35 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Tim Deegan, Xen-devel

>>> On 30.01.17 at 18:26, <andrew.cooper3@citrix.com> wrote:
> @@ -1680,7 +1688,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr,
>      mfn = page_to_mfn(page);
>      ASSERT(mfn_valid(mfn));
>  
> -    v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
> +    v->arch.paging.shadow.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);

Please drop the now unnecessary !! (and use true/false instead of
1/0 in other places).

> @@ -797,15 +797,15 @@ static inline unsigned long vtlb_lookup(struct vcpu *v,
>      unsigned long frame_number = gfn_x(INVALID_GFN);
>      int i = vtlb_hash(page_number);
>  
> -    spin_lock(&v->arch.paging.vtlb_lock);
> -    if ( v->arch.paging.vtlb[i].pfec != 0
> -         && v->arch.paging.vtlb[i].page_number == page_number
> +    spin_lock(&v->arch.paging.shadow.vtlb_lock);
> +    if ( v->arch.paging.shadow.vtlb[i].pfec != 0
> +         && v->arch.paging.shadow.vtlb[i].page_number == page_number
>           /* Any successful walk that had at least these pfec bits is OK */
> -         && (v->arch.paging.vtlb[i].pfec & pfec) == pfec )
> +         && (v->arch.paging.shadow.vtlb[i].pfec & pfec) == pfec )
>      {
> -        frame_number = v->arch.paging.vtlb[i].frame_number;
> +        frame_number = v->arch.paging.shadow.vtlb[i].frame_number;
>      }
> -    spin_unlock(&v->arch.paging.vtlb_lock);
> +    spin_unlock(&v->arch.paging.shadow.vtlb_lock);
>      return frame_number;
>  }

Since you need to touch all these vtlb_lock accessing lines anyway,
would it perhaps make sense to switch to an r/w lock at once?

> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -147,7 +147,16 @@ struct shadow_vcpu {
>          unsigned long off[SHADOW_OOS_FIXUPS];
>      } oos_fixup[SHADOW_OOS_PAGES];
>  
> -    bool_t pagetable_dying;
> +    /* Translated guest: virtual TLB */
> +    struct shadow_vtlb *vtlb;
> +    spinlock_t          vtlb_lock;
> +
> +    /* HVM guest: last emulate was to a pagetable */
> +    bool last_write_was_pt;
> +    /* HVM guest: last write emulation succeeds */
> +    bool last_write_emul_ok;
> +
> +    bool pagetable_dying;

Perhaps make all of these bools bit fields?

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init()
  2017-01-30 17:26 [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init() Andrew Cooper
                   ` (2 preceding siblings ...)
  2017-01-31 10:27 ` Jan Beulich
@ 2017-02-08 10:14 ` Tim Deegan
  3 siblings, 0 replies; 7+ messages in thread
From: Tim Deegan @ 2017-02-08 10:14 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Jan Beulich, Xen-devel

At 17:26 +0000 on 30 Jan (1485797161), Andrew Cooper wrote:
> No functional change yet, but required for subsequent changes.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu
  2017-01-30 17:26 ` [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu Andrew Cooper
  2017-01-31 10:35   ` Jan Beulich
@ 2017-02-08 10:20   ` Tim Deegan
  1 sibling, 0 replies; 7+ messages in thread
From: Tim Deegan @ 2017-02-08 10:20 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, Jan Beulich, Xen-devel

At 17:26 +0000 on 30 Jan (1485797162), Andrew Cooper wrote:
> The vTLB and last_write* booleans are used exclusively by the shadow pagetable
> code.  Move them from paging_vcpu to shadow_vcpu, which causes them to be
> entirely omitted on a build without shadow paging support.
> 
> While changing the qualified names of these variables, drop an unnessary NULL
> check before freeing the vTLB, and move allocation of the vTLB from
> sh_update_paging_modes() to shadow_vcpu_init() where it more logically
> belongs.

In future, can you please separate incidental cleanups from the
mechanical renaming parts?  It makes patch review much easier.

> +#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
> +    /* Allocate a virtual TLB for this vcpu. */
> +    v->arch.paging.shadow.vtlb = xzalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
> +    if ( !v->arch.paging.shadow.vtlb )
> +        return -ENOMEM;
> +    spin_lock_init(&v->arch.paging.shadow.vtlb_lock);
> +#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */

As Jan points out, the higher-level error path can now leak this array.

Apart from that, this looks good to me.

Cheers,

Tim.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2017-02-08 10:20 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-30 17:26 [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init() Andrew Cooper
2017-01-30 17:26 ` [PATCH 2/2] x86/shadow: Move shadow pagetable fields into struct shadow_vcpu Andrew Cooper
2017-01-31 10:35   ` Jan Beulich
2017-02-08 10:20   ` Tim Deegan
2017-01-30 17:48 ` [PATCH 1/2] x86/mm: Plumb a error return through {paging, hap, shadow}_vcpu_init() George Dunlap
2017-01-31 10:27 ` Jan Beulich
2017-02-08 10:14 ` Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.