All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Tim Deegan" <tim@xen.org>
Subject: [PATCH 03/16] x86/shadow: drop redundant present bit checks from SHADOW_FOREACH_L<N>E() "bodys"
Date: Wed, 22 Mar 2023 10:31:02 +0100	[thread overview]
Message-ID: <54e7a146-6f86-95a7-7b3b-0ba5dd418949@suse.com> (raw)
In-Reply-To: <dd9205b8-63f0-b1bc-f2b8-50d5da2bf2a7@suse.com>

SHADOW_FOREACH_L<N>E() already invokes the "body" only when the present
bit is set; no need to re-do the check.

While there also
- stop open-coding mfn_to_maddr() in code being touched (re-indented)
  anyway,
- stop open-coding mfn_eq() in code being touched or adjacent code,
- drop local variables when they're no longer used at least twice.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1289,12 +1289,8 @@ void sh_destroy_l4_shadow(struct domain
     /* Decrement refcounts of all the old entries */
     sl4mfn = smfn;
     SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, {
-        if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
-        {
-            sh_put_ref(d, shadow_l4e_get_mfn(*sl4e),
-                       (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
-                       | ((unsigned long)sl4e & ~PAGE_MASK));
-        }
+        sh_put_ref(d, shadow_l4e_get_mfn(*sl4e),
+                   mfn_to_maddr(sl4mfn) | ((unsigned long)sl4e & ~PAGE_MASK));
     });
 
     /* Put the memory back in the pool */
@@ -1320,10 +1316,8 @@ void sh_destroy_l3_shadow(struct domain
     /* Decrement refcounts of all the old entries */
     sl3mfn = smfn;
     SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, 0, {
-        if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
-            sh_put_ref(d, shadow_l3e_get_mfn(*sl3e),
-                        (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
-                        | ((unsigned long)sl3e & ~PAGE_MASK));
+        sh_put_ref(d, shadow_l3e_get_mfn(*sl3e),
+                   mfn_to_maddr(sl3mfn) | ((unsigned long)sl3e & ~PAGE_MASK));
     });
 
     /* Put the memory back in the pool */
@@ -1352,10 +1346,8 @@ void sh_destroy_l2_shadow(struct domain
     /* Decrement refcounts of all the old entries */
     sl2mfn = smfn;
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
-        if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
-            sh_put_ref(d, shadow_l2e_get_mfn(*sl2e),
-                        (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
-                        | ((unsigned long)sl2e & ~PAGE_MASK));
+        sh_put_ref(d, shadow_l2e_get_mfn(*sl2e),
+                   mfn_to_maddr(sl2mfn) | ((unsigned long)sl2e & ~PAGE_MASK));
     });
 
     /* Put the memory back in the pool */
@@ -1390,11 +1382,10 @@ void sh_destroy_l1_shadow(struct domain
         /* Decrement refcounts of all the old entries */
         mfn_t sl1mfn = smfn;
         SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, 0, {
-            unsigned int sl1f = shadow_l1e_get_flags(*sl1e);
-
-            if ( (sl1f & _PAGE_PRESENT) && !sh_l1e_is_magic(*sl1e) )
+            if ( !sh_l1e_is_magic(*sl1e) )
             {
-                shadow_vram_put_mfn(shadow_l1e_get_mfn(*sl1e), sl1f,
+                shadow_vram_put_mfn(shadow_l1e_get_mfn(*sl1e),
+                                    shadow_l1e_get_flags(*sl1e),
                                     sl1mfn, sl1e, d);
                 shadow_put_page_from_l1e(*sl1e, d);
             }
@@ -3559,7 +3550,6 @@ int cf_check sh_rm_write_access_from_l1(
 {
     shadow_l1e_t *sl1e;
     int done = 0;
-    int flags;
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
     struct vcpu *curr = current;
     mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */
@@ -3567,10 +3557,8 @@ int cf_check sh_rm_write_access_from_l1(
 
     SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
     {
-        flags = shadow_l1e_get_flags(*sl1e);
-        if ( (flags & _PAGE_PRESENT)
-             && (flags & _PAGE_RW)
-             && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) )
+        if ( (shadow_l1e_get_flags(*sl1e) & _PAGE_RW) &&
+             mfn_eq(shadow_l1e_get_mfn(*sl1e), readonly_mfn) )
         {
             shadow_l1e_t ro_sl1e = shadow_l1e_remove_flags(*sl1e, _PAGE_RW);
 
@@ -3596,13 +3584,10 @@ int cf_check sh_rm_mappings_from_l1(
 {
     shadow_l1e_t *sl1e;
     int done = 0;
-    int flags;
 
     SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
     {
-        flags = shadow_l1e_get_flags(*sl1e);
-        if ( (flags & _PAGE_PRESENT)
-             && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) )
+        if ( mfn_eq(shadow_l1e_get_mfn(*sl1e), target_mfn) )
         {
             shadow_set_l1e(d, sl1e, shadow_l1e_empty(), p2m_invalid, sl1mfn);
             if ( sh_check_page_has_no_refs(mfn_to_page(target_mfn)) )
@@ -3647,13 +3632,10 @@ int cf_check sh_remove_l1_shadow(struct
 {
     shadow_l2e_t *sl2e;
     int done = 0;
-    int flags;
 
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, d,
     {
-        flags = shadow_l2e_get_flags(*sl2e);
-        if ( (flags & _PAGE_PRESENT)
-             && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
+        if ( mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn) )
         {
             shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn);
             if ( mfn_to_page(sl1mfn)->u.sh.type == 0 )
@@ -3670,13 +3652,10 @@ int cf_check sh_remove_l2_shadow(struct
 {
     shadow_l3e_t *sl3e;
     int done = 0;
-    int flags;
 
     SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, done,
     {
-        flags = shadow_l3e_get_flags(*sl3e);
-        if ( (flags & _PAGE_PRESENT)
-             && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
+        if ( mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn) )
         {
             shadow_set_l3e(d, sl3e, shadow_l3e_empty(), sl3mfn);
             if ( mfn_to_page(sl2mfn)->u.sh.type == 0 )
@@ -3692,13 +3671,10 @@ int cf_check sh_remove_l3_shadow(struct
 {
     shadow_l4e_t *sl4e;
     int done = 0;
-    int flags;
 
     SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, d,
     {
-        flags = shadow_l4e_get_flags(*sl4e);
-        if ( (flags & _PAGE_PRESENT)
-             && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
+        if ( mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn) )
         {
             shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn);
             if ( mfn_to_page(sl3mfn)->u.sh.type == 0 )



  parent reply	other threads:[~2023-03-22  9:31 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-22  9:28 [PATCH 00/16] x86: assorted (mostly) shadow mode adjustments Jan Beulich
2023-03-22  9:29 ` [PATCH 01/16] x86/shadow: fix and improve sh_page_has_multiple_shadows() Jan Beulich
2023-03-23 11:55   ` Andrew Cooper
2023-03-22  9:30 ` [PATCH 02/16] x86/shadow: fold/rename sh_unhook_*_mappings() Jan Beulich
2023-03-22  9:31 ` Jan Beulich [this message]
2023-03-23 12:14   ` [PATCH 03/16] x86/shadow: drop redundant present bit checks from SHADOW_FOREACH_L<N>E() "bodys" Andrew Cooper
2023-03-22  9:31 ` [PATCH 04/16] x86/shadow: replace memcmp() in sh_resync_l1() Jan Beulich
2023-03-23 12:55   ` Andrew Cooper
2023-03-22  9:31 ` [PATCH 05/16] x86/shadow: reduce explicit log-dirty recording for HVM Jan Beulich
2023-03-22  9:32 ` [PATCH 06/16] x86/shadow: purge {write,cmpxchg}_guest_entry() hooks Jan Beulich
2023-03-23 13:13   ` Andrew Cooper
2023-03-22  9:33 ` [PATCH 07/16] x86/shadow: call sh_update_cr3() directly from sh_page_fault() Jan Beulich
2023-03-23 13:34   ` Andrew Cooper
2023-03-27 15:39   ` Tim Deegan
2023-03-28 10:37     ` Jan Beulich
2023-03-29  6:49       ` Tim Deegan
2023-03-22  9:34 ` [PATCH 08/16] x86/shadow: use lighter weight mode checks Jan Beulich
2023-03-22  9:34 ` [PATCH 09/16] x86/shadow: OOS mode is HVM-only Jan Beulich
2023-03-23 13:42   ` Andrew Cooper
2023-03-22  9:35 ` [PATCH 10/16] x86/shadow: move OOS functions to their own file Jan Beulich
2023-03-23 14:30   ` Andrew Cooper
2023-03-23 15:40     ` Jan Beulich
2023-03-23 17:44       ` Andrew Cooper
2023-03-22  9:35 ` [PATCH 11/16] x86/shadow: drop is_hvm_...() where easily possible Jan Beulich
2023-03-23 18:18   ` Andrew Cooper
2023-03-24  7:38     ` Jan Beulich
2023-03-28 13:57       ` Andrew Cooper
2023-03-28 14:41         ` Jan Beulich
2023-03-22  9:35 ` [PATCH 12/16] x86/shadow: make monitor table create/destroy more consistent Jan Beulich
2023-03-23 18:28   ` Andrew Cooper
2023-03-24  7:52     ` Jan Beulich
2023-03-22  9:36 ` [PATCH 13/16] x86/shadow: vCPU-s never have "no mode" Jan Beulich
2023-03-23 18:29   ` Andrew Cooper
2023-03-22  9:37 ` [PATCH 14/16] x86/shadow: "monitor table" is a HVM-only concept Jan Beulich
2023-03-23 18:30   ` Andrew Cooper
2023-03-22  9:37 ` [PATCH 15/16] x86/shadow: adjust monitor table prealloc amount Jan Beulich
2023-03-22  9:38 ` [PATCH 16/16] x86/PV: conditionalize arch_set_info_guest()'s call to update_cr3() Jan Beulich
2023-03-23 18:34   ` Andrew Cooper
2023-03-23 11:40 ` [PATCH 00/16] x86: assorted (mostly) shadow mode adjustments Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=54e7a146-6f86-95a7-7b3b-0ba5dd418949@suse.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=roger.pau@citrix.com \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.