xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Tim Deegan <tim@xen.org>
Subject: [PATCH 2/2] x86/shadow: avoid extra local array variable
Date: Thu, 10 Mar 2016 03:13:40 -0700	[thread overview]
Message-ID: <56E156E402000078000DB2BE@prv-mh.provo.novell.com> (raw)
In-Reply-To: <56E1555002000078000DB293@prv-mh.provo.novell.com>

[-- Attachment #1: Type: text/plain, Size: 6445 bytes --]

mfns[2] was there just because struct sh_emulate_ctxt's two MFN values
can't be used to hand to vmap(). Making the structure fields an array
avoids the extra copying.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1746,11 +1746,11 @@ void *sh_emulate_map_dest(struct vcpu *v
     struct domain *d = v->domain;
     void *map;
 
-    sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
-    if ( !mfn_valid(sh_ctxt->mfn1) )
-        return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
+    sh_ctxt->mfn[0] = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
+    if ( !mfn_valid(sh_ctxt->mfn[0]) )
+        return ((mfn_x(sh_ctxt->mfn[0]) == BAD_GVA_TO_GFN) ?
                 MAPPING_EXCEPTION :
-                (mfn_x(sh_ctxt->mfn1) == READONLY_GFN) ?
+                (mfn_x(sh_ctxt->mfn[0]) == READONLY_GFN) ?
                 MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
 
 #ifndef NDEBUG
@@ -1767,39 +1767,36 @@ void *sh_emulate_map_dest(struct vcpu *v
 
     /* Unaligned writes mean probably this isn't a pagetable. */
     if ( vaddr & (bytes - 1) )
-        sh_remove_shadows(d, sh_ctxt->mfn1, 0, 0 /* Slow, can fail. */ );
+        sh_remove_shadows(d, sh_ctxt->mfn[0], 0, 0 /* Slow, can fail. */ );
 
     if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) )
     {
         /* Whole write fits on a single page. */
-        sh_ctxt->mfn2 = _mfn(INVALID_MFN);
-        map = map_domain_page(sh_ctxt->mfn1) + (vaddr & ~PAGE_MASK);
+        sh_ctxt->mfn[1] = _mfn(INVALID_MFN);
+        map = map_domain_page(sh_ctxt->mfn[0]) + (vaddr & ~PAGE_MASK);
     }
-    else
+    else if ( !is_hvm_domain(d) )
     {
-        mfn_t mfns[2];
-
         /*
          * Cross-page emulated writes are only supported for HVM guests;
          * PV guests ought to know better.
          */
-        if ( !is_hvm_domain(d) )
-            return MAPPING_UNHANDLEABLE;
-
+        return MAPPING_UNHANDLEABLE;
+    }
+    else
+    {
         /* This write crosses a page boundary. Translate the second page. */
-        sh_ctxt->mfn2 = emulate_gva_to_mfn(v, vaddr + bytes, sh_ctxt);
-        if ( !mfn_valid(sh_ctxt->mfn2) )
-            return ((mfn_x(sh_ctxt->mfn2) == BAD_GVA_TO_GFN) ?
+        sh_ctxt->mfn[1] = emulate_gva_to_mfn(v, vaddr + bytes, sh_ctxt);
+        if ( !mfn_valid(sh_ctxt->mfn[1]) )
+            return ((mfn_x(sh_ctxt->mfn[1]) == BAD_GVA_TO_GFN) ?
                     MAPPING_EXCEPTION :
-                    (mfn_x(sh_ctxt->mfn2) == READONLY_GFN) ?
+                    (mfn_x(sh_ctxt->mfn[1]) == READONLY_GFN) ?
                     MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
 
         /* Cross-page writes mean probably not a pagetable. */
-        sh_remove_shadows(d, sh_ctxt->mfn2, 0, 0 /* Slow, can fail. */ );
+        sh_remove_shadows(d, sh_ctxt->mfn[1], 0, 0 /* Slow, can fail. */ );
 
-        mfns[0] = sh_ctxt->mfn1;
-        mfns[1] = sh_ctxt->mfn2;
-        map = vmap(mfns, 2);
+        map = vmap(sh_ctxt->mfn, 2);
         if ( !map )
             return MAPPING_UNHANDLEABLE;
         map += (vaddr & ~PAGE_MASK);
@@ -1831,7 +1828,7 @@ void sh_emulate_unmap_dest(struct vcpu *
      *  - it was aligned to the PTE boundaries; and
      *  - _PAGE_PRESENT was clear before and after the write.
      */
-    shflags = mfn_to_page(sh_ctxt->mfn1)->shadow_flags;
+    shflags = mfn_to_page(sh_ctxt->mfn[0])->shadow_flags;
 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
     if ( sh_ctxt->low_bit_was_clear
          && !(*(u8 *)addr & _PAGE_PRESENT)
@@ -1852,12 +1849,12 @@ void sh_emulate_unmap_dest(struct vcpu *
               && bytes <= 4)) )
     {
         /* Writes with this alignment constraint can't possibly cross pages. */
-        ASSERT(!mfn_valid(sh_ctxt->mfn2));
+        ASSERT(!mfn_valid(sh_ctxt->mfn[1]));
     }
     else
 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY */
     {
-        if ( unlikely(mfn_valid(sh_ctxt->mfn2)) )
+        if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) )
         {
             /* Validate as two writes, one to each page. */
             b1 = PAGE_SIZE - (((unsigned long)addr) & ~PAGE_MASK);
@@ -1865,16 +1862,16 @@ void sh_emulate_unmap_dest(struct vcpu *
             ASSERT(b2 < bytes);
         }
         if ( likely(b1 > 0) )
-            sh_validate_guest_pt_write(v, sh_ctxt->mfn1, addr, b1);
+            sh_validate_guest_pt_write(v, sh_ctxt->mfn[0], addr, b1);
         if ( unlikely(b2 > 0) )
-            sh_validate_guest_pt_write(v, sh_ctxt->mfn2, addr + b1, b2);
+            sh_validate_guest_pt_write(v, sh_ctxt->mfn[1], addr + b1, b2);
     }
 
-    paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn1));
+    paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[0]));
 
-    if ( unlikely(mfn_valid(sh_ctxt->mfn2)) )
+    if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) )
     {
-        paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn2));
+        paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[1]));
         vunmap((void *)((unsigned long)addr & PAGE_MASK));
     }
     else
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4615,13 +4615,13 @@ static void emulate_unmap_dest(struct vc
                                u32 bytes,
                                struct sh_emulate_ctxt *sh_ctxt)
 {
-    ASSERT(mfn_valid(sh_ctxt->mfn1));
+    ASSERT(mfn_valid(sh_ctxt->mfn[0]));
 
     /* If we are writing lots of PTE-aligned zeros, might want to unshadow */
     if ( likely(bytes >= 4) && (*(u32 *)addr == 0) )
     {
         if ( ((unsigned long) addr & ((sizeof (guest_intpte_t)) - 1)) == 0 )
-            check_for_early_unshadow(v, sh_ctxt->mfn1);
+            check_for_early_unshadow(v, sh_ctxt->mfn[0]);
         /* Don't reset the heuristic if we're writing zeros at non-aligned
          * addresses, otherwise it doesn't catch REP MOVSD on PAE guests */
     }
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -727,7 +727,7 @@ struct sh_emulate_ctxt {
     struct segment_register seg_reg[6];
 
     /* MFNs being written to in write/cmpxchg callbacks */
-    mfn_t mfn1, mfn2;
+    mfn_t mfn[2];
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
     /* Special case for avoiding having to verify writes: remember



[-- Attachment #2: x86-sh-mfn-array.patch --]
[-- Type: text/plain, Size: 6489 bytes --]

x86/shadow: avoid extra local array variable

mfns[2] was there just because struct sh_emulate_ctxt's two MFN values
can't be used to hand to vmap(). Making the structure fields an array
avoids the extra copying.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1746,11 +1746,11 @@ void *sh_emulate_map_dest(struct vcpu *v
     struct domain *d = v->domain;
     void *map;
 
-    sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
-    if ( !mfn_valid(sh_ctxt->mfn1) )
-        return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
+    sh_ctxt->mfn[0] = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
+    if ( !mfn_valid(sh_ctxt->mfn[0]) )
+        return ((mfn_x(sh_ctxt->mfn[0]) == BAD_GVA_TO_GFN) ?
                 MAPPING_EXCEPTION :
-                (mfn_x(sh_ctxt->mfn1) == READONLY_GFN) ?
+                (mfn_x(sh_ctxt->mfn[0]) == READONLY_GFN) ?
                 MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
 
 #ifndef NDEBUG
@@ -1767,39 +1767,36 @@ void *sh_emulate_map_dest(struct vcpu *v
 
     /* Unaligned writes mean probably this isn't a pagetable. */
     if ( vaddr & (bytes - 1) )
-        sh_remove_shadows(d, sh_ctxt->mfn1, 0, 0 /* Slow, can fail. */ );
+        sh_remove_shadows(d, sh_ctxt->mfn[0], 0, 0 /* Slow, can fail. */ );
 
     if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) )
     {
         /* Whole write fits on a single page. */
-        sh_ctxt->mfn2 = _mfn(INVALID_MFN);
-        map = map_domain_page(sh_ctxt->mfn1) + (vaddr & ~PAGE_MASK);
+        sh_ctxt->mfn[1] = _mfn(INVALID_MFN);
+        map = map_domain_page(sh_ctxt->mfn[0]) + (vaddr & ~PAGE_MASK);
     }
-    else
+    else if ( !is_hvm_domain(d) )
     {
-        mfn_t mfns[2];
-
         /*
          * Cross-page emulated writes are only supported for HVM guests;
          * PV guests ought to know better.
          */
-        if ( !is_hvm_domain(d) )
-            return MAPPING_UNHANDLEABLE;
-
+        return MAPPING_UNHANDLEABLE;
+    }
+    else
+    {
         /* This write crosses a page boundary. Translate the second page. */
-        sh_ctxt->mfn2 = emulate_gva_to_mfn(v, vaddr + bytes, sh_ctxt);
-        if ( !mfn_valid(sh_ctxt->mfn2) )
-            return ((mfn_x(sh_ctxt->mfn2) == BAD_GVA_TO_GFN) ?
+        sh_ctxt->mfn[1] = emulate_gva_to_mfn(v, vaddr + bytes, sh_ctxt);
+        if ( !mfn_valid(sh_ctxt->mfn[1]) )
+            return ((mfn_x(sh_ctxt->mfn[1]) == BAD_GVA_TO_GFN) ?
                     MAPPING_EXCEPTION :
-                    (mfn_x(sh_ctxt->mfn2) == READONLY_GFN) ?
+                    (mfn_x(sh_ctxt->mfn[1]) == READONLY_GFN) ?
                     MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
 
         /* Cross-page writes mean probably not a pagetable. */
-        sh_remove_shadows(d, sh_ctxt->mfn2, 0, 0 /* Slow, can fail. */ );
+        sh_remove_shadows(d, sh_ctxt->mfn[1], 0, 0 /* Slow, can fail. */ );
 
-        mfns[0] = sh_ctxt->mfn1;
-        mfns[1] = sh_ctxt->mfn2;
-        map = vmap(mfns, 2);
+        map = vmap(sh_ctxt->mfn, 2);
         if ( !map )
             return MAPPING_UNHANDLEABLE;
         map += (vaddr & ~PAGE_MASK);
@@ -1831,7 +1828,7 @@ void sh_emulate_unmap_dest(struct vcpu *
      *  - it was aligned to the PTE boundaries; and
      *  - _PAGE_PRESENT was clear before and after the write.
      */
-    shflags = mfn_to_page(sh_ctxt->mfn1)->shadow_flags;
+    shflags = mfn_to_page(sh_ctxt->mfn[0])->shadow_flags;
 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
     if ( sh_ctxt->low_bit_was_clear
          && !(*(u8 *)addr & _PAGE_PRESENT)
@@ -1852,12 +1849,12 @@ void sh_emulate_unmap_dest(struct vcpu *
               && bytes <= 4)) )
     {
         /* Writes with this alignment constraint can't possibly cross pages. */
-        ASSERT(!mfn_valid(sh_ctxt->mfn2));
+        ASSERT(!mfn_valid(sh_ctxt->mfn[1]));
     }
     else
 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY */
     {
-        if ( unlikely(mfn_valid(sh_ctxt->mfn2)) )
+        if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) )
         {
             /* Validate as two writes, one to each page. */
             b1 = PAGE_SIZE - (((unsigned long)addr) & ~PAGE_MASK);
@@ -1865,16 +1862,16 @@ void sh_emulate_unmap_dest(struct vcpu *
             ASSERT(b2 < bytes);
         }
         if ( likely(b1 > 0) )
-            sh_validate_guest_pt_write(v, sh_ctxt->mfn1, addr, b1);
+            sh_validate_guest_pt_write(v, sh_ctxt->mfn[0], addr, b1);
         if ( unlikely(b2 > 0) )
-            sh_validate_guest_pt_write(v, sh_ctxt->mfn2, addr + b1, b2);
+            sh_validate_guest_pt_write(v, sh_ctxt->mfn[1], addr + b1, b2);
     }
 
-    paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn1));
+    paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[0]));
 
-    if ( unlikely(mfn_valid(sh_ctxt->mfn2)) )
+    if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) )
     {
-        paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn2));
+        paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[1]));
         vunmap((void *)((unsigned long)addr & PAGE_MASK));
     }
     else
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4615,13 +4615,13 @@ static void emulate_unmap_dest(struct vc
                                u32 bytes,
                                struct sh_emulate_ctxt *sh_ctxt)
 {
-    ASSERT(mfn_valid(sh_ctxt->mfn1));
+    ASSERT(mfn_valid(sh_ctxt->mfn[0]));
 
     /* If we are writing lots of PTE-aligned zeros, might want to unshadow */
     if ( likely(bytes >= 4) && (*(u32 *)addr == 0) )
     {
         if ( ((unsigned long) addr & ((sizeof (guest_intpte_t)) - 1)) == 0 )
-            check_for_early_unshadow(v, sh_ctxt->mfn1);
+            check_for_early_unshadow(v, sh_ctxt->mfn[0]);
         /* Don't reset the heuristic if we're writing zeros at non-aligned
          * addresses, otherwise it doesn't catch REP MOVSD on PAE guests */
     }
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -727,7 +727,7 @@ struct sh_emulate_ctxt {
     struct segment_register seg_reg[6];
 
     /* MFNs being written to in write/cmpxchg callbacks */
-    mfn_t mfn1, mfn2;
+    mfn_t mfn[2];
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
     /* Special case for avoiding having to verify writes: remember

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-03-10 10:13 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-10 10:06 [PATCH 0/2] x86/shadow: further code size reduction Jan Beulich
2016-03-10 10:13 ` [PATCH 1/2] x86/shadow: compile most write emulation code just once Jan Beulich
2016-03-10 11:40   ` Andrew Cooper
     [not found]   ` <20160310121750.GB86157@deinos.phlegethon.org>
2016-03-10 13:13     ` Jan Beulich
2016-03-10 10:13 ` Jan Beulich [this message]
2016-03-10 11:43   ` [PATCH 2/2] x86/shadow: avoid extra local array variable Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=56E156E402000078000DB2BE@prv-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).