All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Tim Deegan" <tim@xen.org>
Subject: [PATCH v2 11/12] x86/p2m: write_p2m_entry_{pre,post} hooks are HVM-only
Date: Mon, 12 Apr 2021 16:13:52 +0200	[thread overview]
Message-ID: <507182a8-38c3-cda3-d921-7494a5df63a7@suse.com> (raw)
In-Reply-To: <3cf73378-b9d6-0eca-12b6-0f628518bebf@suse.com>

Move respective shadow code to its HVM-only source file, thus making it
possible to exclude the hooks as well. This then shows that
shadow_p2m_init() also isn't needed in !HVM builds.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.

--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2327,22 +2327,6 @@ void shadow_prepare_page_type_change(str
     shadow_remove_all_shadows(d, page_to_mfn(page));
 }
 
-static void
-sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn)
-/* Even harsher: this is a HVM page that we thing is no longer a pagetable.
- * Unshadow it, and recursively unshadow pages that reference it. */
-{
-    sh_remove_shadows(d, gmfn, 0, 1);
-    /* XXX TODO:
-     * Rework this hashtable walker to return a linked-list of all
-     * the shadows it modified, then do breadth-first recursion
-     * to find the way up to higher-level tables and unshadow them too.
-     *
-     * The current code (just tearing down each page's shadows as we
-     * detect that it is not a pagetable) is correct, but very slow.
-     * It means extra emulated writes and slows down removal of mappings. */
-}
-
 /**************************************************************************/
 
 /* Reset the up-pointers of every L3 shadow to 0.
@@ -3086,126 +3070,6 @@ static int shadow_test_disable(struct do
 }
 
 /**************************************************************************/
-/* P2M map manipulations */
-
-/* shadow specific code which should be called when P2M table entry is updated
- * with new content. It is responsible for update the entry, as well as other
- * shadow processing jobs.
- */
-
-static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
-                                       l1_pgentry_t old, l1_pgentry_t new,
-                                       unsigned int level)
-{
-    mfn_t omfn = l1e_get_mfn(old);
-    unsigned int oflags = l1e_get_flags(old);
-    p2m_type_t p2mt = p2m_flags_to_type(oflags);
-    bool flush = false;
-
-    /*
-     * If there are any shadows, update them.  But if shadow_teardown()
-     * has already been called then it's not safe to try.
-     */
-    if ( unlikely(!d->arch.paging.shadow.total_pages) )
-        return;
-
-    switch ( level )
-    {
-    default:
-        /*
-         * The following assertion is to make sure we don't step on 1GB host
-         * page support of HVM guest.
-         */
-        ASSERT(!((oflags & _PAGE_PRESENT) && (oflags & _PAGE_PSE)));
-        break;
-
-    /* If we're removing an MFN from the p2m, remove it from the shadows too */
-    case 1:
-        if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(omfn) )
-        {
-            sh_remove_all_shadows_and_parents(d, omfn);
-            if ( sh_remove_all_mappings(d, omfn, _gfn(gfn)) )
-                flush = true;
-        }
-        break;
-
-    /*
-     * If we're removing a superpage mapping from the p2m, we need to check
-     * all the pages covered by it.  If they're still there in the new
-     * scheme, that's OK, but otherwise they must be unshadowed.
-     */
-    case 2:
-        if ( !(oflags & _PAGE_PRESENT) || !(oflags & _PAGE_PSE) )
-            break;
-
-        if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
-        {
-            unsigned int i;
-            mfn_t nmfn = l1e_get_mfn(new);
-            l1_pgentry_t *npte = NULL;
-
-            /* If we're replacing a superpage with a normal L1 page, map it */
-            if ( (l1e_get_flags(new) & _PAGE_PRESENT) &&
-                 !(l1e_get_flags(new) & _PAGE_PSE) &&
-                 mfn_valid(nmfn) )
-                npte = map_domain_page(nmfn);
-
-            gfn &= ~(L1_PAGETABLE_ENTRIES - 1);
-
-            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
-            {
-                if ( !npte ||
-                     !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i]))) ||
-                     !mfn_eq(l1e_get_mfn(npte[i]), omfn) )
-                {
-                    /* This GFN->MFN mapping has gone away */
-                    sh_remove_all_shadows_and_parents(d, omfn);
-                    if ( sh_remove_all_mappings(d, omfn, _gfn(gfn + i)) )
-                        flush = true;
-                }
-                omfn = mfn_add(omfn, 1);
-            }
-
-            if ( npte )
-                unmap_domain_page(npte);
-        }
-
-        break;
-    }
-
-    if ( flush )
-        guest_flush_tlb_mask(d, d->dirty_cpumask);
-}
-
-#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
-static void
-sh_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
-{
-    struct domain *d = p2m->domain;
-
-    /* If we're doing FAST_FAULT_PATH, then shadow mode may have
-       cached the fact that this is an mmio region in the shadow
-       page tables.  Blow the tables away to remove the cache.
-       This is pretty heavy handed, but this is a rare operation
-       (it might happen a dozen times during boot and then never
-       again), so it doesn't matter too much. */
-    if ( d->arch.paging.shadow.has_fast_mmio_entries )
-    {
-        shadow_blow_tables(d);
-        d->arch.paging.shadow.has_fast_mmio_entries = false;
-    }
-}
-#else
-# define sh_write_p2m_entry_post NULL
-#endif
-
-void shadow_p2m_init(struct p2m_domain *p2m)
-{
-    p2m->write_p2m_entry_pre  = sh_unshadow_for_p2m_change;
-    p2m->write_p2m_entry_post = sh_write_p2m_entry_post;
-}
-
-/**************************************************************************/
 /* Log-dirty mode support */
 
 /* Shadow specific code which is called in paging_log_dirty_enable().
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -774,6 +774,142 @@ void sh_destroy_monitor_table(const stru
 }
 
 /**************************************************************************/
+/* P2M map manipulations */
+
+/* shadow specific code which should be called when P2M table entry is updated
+ * with new content. It is responsible for update the entry, as well as other
+ * shadow processing jobs.
+ */
+
+static void
+sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn)
+/* Even harsher: this is a HVM page that we thing is no longer a pagetable.
+ * Unshadow it, and recursively unshadow pages that reference it. */
+{
+    sh_remove_shadows(d, gmfn, 0, 1);
+    /* XXX TODO:
+     * Rework this hashtable walker to return a linked-list of all
+     * the shadows it modified, then do breadth-first recursion
+     * to find the way up to higher-level tables and unshadow them too.
+     *
+     * The current code (just tearing down each page's shadows as we
+     * detect that it is not a pagetable) is correct, but very slow.
+     * It means extra emulated writes and slows down removal of mappings. */
+}
+
+static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
+                                       l1_pgentry_t old, l1_pgentry_t new,
+                                       unsigned int level)
+{
+    mfn_t omfn = l1e_get_mfn(old);
+    unsigned int oflags = l1e_get_flags(old);
+    p2m_type_t p2mt = p2m_flags_to_type(oflags);
+    bool flush = false;
+
+    /*
+     * If there are any shadows, update them.  But if shadow_teardown()
+     * has already been called then it's not safe to try.
+     */
+    if ( unlikely(!d->arch.paging.shadow.total_pages) )
+        return;
+
+    switch ( level )
+    {
+    default:
+        /*
+         * The following assertion is to make sure we don't step on 1GB host
+         * page support of HVM guest.
+         */
+        ASSERT(!((oflags & _PAGE_PRESENT) && (oflags & _PAGE_PSE)));
+        break;
+
+    /* If we're removing an MFN from the p2m, remove it from the shadows too */
+    case 1:
+        if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(omfn) )
+        {
+            sh_remove_all_shadows_and_parents(d, omfn);
+            if ( sh_remove_all_mappings(d, omfn, _gfn(gfn)) )
+                flush = true;
+        }
+        break;
+
+    /*
+     * If we're removing a superpage mapping from the p2m, we need to check
+     * all the pages covered by it.  If they're still there in the new
+     * scheme, that's OK, but otherwise they must be unshadowed.
+     */
+    case 2:
+        if ( !(oflags & _PAGE_PRESENT) || !(oflags & _PAGE_PSE) )
+            break;
+
+        if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
+        {
+            unsigned int i;
+            mfn_t nmfn = l1e_get_mfn(new);
+            l1_pgentry_t *npte = NULL;
+
+            /* If we're replacing a superpage with a normal L1 page, map it */
+            if ( (l1e_get_flags(new) & _PAGE_PRESENT) &&
+                 !(l1e_get_flags(new) & _PAGE_PSE) &&
+                 mfn_valid(nmfn) )
+                npte = map_domain_page(nmfn);
+
+            gfn &= ~(L1_PAGETABLE_ENTRIES - 1);
+
+            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+            {
+                if ( !npte ||
+                     !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i]))) ||
+                     !mfn_eq(l1e_get_mfn(npte[i]), omfn) )
+                {
+                    /* This GFN->MFN mapping has gone away */
+                    sh_remove_all_shadows_and_parents(d, omfn);
+                    if ( sh_remove_all_mappings(d, omfn, _gfn(gfn + i)) )
+                        flush = true;
+                }
+                omfn = mfn_add(omfn, 1);
+            }
+
+            if ( npte )
+                unmap_domain_page(npte);
+        }
+
+        break;
+    }
+
+    if ( flush )
+        guest_flush_tlb_mask(d, d->dirty_cpumask);
+}
+
+#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
+static void
+sh_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
+{
+    struct domain *d = p2m->domain;
+
+    /* If we're doing FAST_FAULT_PATH, then shadow mode may have
+       cached the fact that this is an mmio region in the shadow
+       page tables.  Blow the tables away to remove the cache.
+       This is pretty heavy handed, but this is a rare operation
+       (it might happen a dozen times during boot and then never
+       again), so it doesn't matter too much. */
+    if ( d->arch.paging.shadow.has_fast_mmio_entries )
+    {
+        shadow_blow_tables(d);
+        d->arch.paging.shadow.has_fast_mmio_entries = false;
+    }
+}
+#else
+# define sh_write_p2m_entry_post NULL
+#endif
+
+void shadow_p2m_init(struct p2m_domain *p2m)
+{
+    p2m->write_p2m_entry_pre  = sh_unshadow_for_p2m_change;
+    p2m->write_p2m_entry_post = sh_write_p2m_entry_post;
+}
+
+/**************************************************************************/
 /* VRAM dirty tracking support */
 int shadow_track_dirty_vram(struct domain *d,
                             unsigned long begin_pfn,
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -278,7 +278,6 @@ struct p2m_domain {
                                                   unsigned long first_gfn,
                                                   unsigned long last_gfn);
     void               (*memory_type_changed)(struct p2m_domain *p2m);
-#endif
     void               (*write_p2m_entry_pre)(struct domain *d,
                                               unsigned long gfn,
                                               l1_pgentry_t old,
@@ -286,6 +285,7 @@ struct p2m_domain {
                                               unsigned int level);
     void               (*write_p2m_entry_post)(struct p2m_domain *p2m,
                                                unsigned int oflags);
+#endif
 #if P2M_AUDIT
     long               (*audit_p2m)(struct p2m_domain *p2m);
 #endif
@@ -788,8 +788,6 @@ int __must_check p2m_set_entry(struct p2
 #if defined(CONFIG_HVM)
 /* Set up function pointers for PT implementation: only for use by p2m code */
 extern void p2m_pt_init(struct p2m_domain *p2m);
-#elif defined(CONFIG_SHADOW_PAGING)
-# define p2m_pt_init shadow_p2m_init
 #else
 static inline void p2m_pt_init(struct p2m_domain *p2m) {}
 #endif



  parent reply	other threads:[~2021-04-12 14:14 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-12 14:03 [PATCH v2 00/12] x86/p2m: restrict more code to build just for HVM Jan Beulich
2021-04-12 14:05 ` [PATCH v2 01/12] x86/p2m: set_{foreign,mmio}_p2m_entry() are HVM-only Jan Beulich
2021-04-29 13:17   ` Roger Pau Monné
2021-04-29 14:09     ` Jan Beulich
2021-04-29 15:06       ` Roger Pau Monné
2021-04-12 14:06 ` [PATCH v2 02/12] x86/p2m: {,un}map_mmio_regions() " Jan Beulich
2021-04-29 14:48   ` Roger Pau Monné
2021-04-29 15:01     ` Jan Beulich
2021-04-12 14:07 ` [PATCH v2 03/12] x86/mm: the gva_to_gfn() hook is HVM-only Jan Beulich
2021-04-15 16:22   ` Tim Deegan
2021-04-12 14:07 ` [PATCH v2 04/12] AMD/IOMMU: guest IOMMU support is for HVM only Jan Beulich
2021-04-29 15:14   ` Roger Pau Monné
2021-04-12 14:08 ` [PATCH v2 05/12] x86/p2m: change_entry_type_* hooks are HVM-only Jan Beulich
2021-04-29 15:49   ` Roger Pau Monné
2021-04-12 14:08 ` [PATCH v2 06/12] x86/p2m: hardware-log-dirty related " Jan Beulich
2021-04-29 16:05   ` Roger Pau Monné
2021-04-12 14:09 ` [PATCH v2 07/12] x86/p2m: the recalc hook is HVM-only Jan Beulich
2021-04-30  9:27   ` Roger Pau Monné
2021-04-12 14:10 ` [PATCH v2 08/12] x86: mem-access " Jan Beulich
2021-04-12 14:16   ` Tamas K Lengyel
2021-04-12 14:48   ` Isaila Alexandru
2021-04-12 14:12 ` [PATCH v2 09/12] x86: make mem-paging configuarable and default it to off for being unsupported Jan Beulich
2021-04-12 14:18   ` Tamas K Lengyel
2021-04-12 14:47     ` Isaila Alexandru
2021-04-12 14:27   ` Isaila Alexandru
2021-04-30  9:55   ` Roger Pau Monné
2021-04-30 14:16     ` Jan Beulich
2021-04-30 14:37       ` Roger Pau Monné
2021-04-12 14:13 ` [PATCH v2 10/12] x86/p2m: {get,set}_entry hooks and p2m-pt.c are HVM-only Jan Beulich
2021-04-30 10:57   ` Roger Pau Monné
2021-04-12 14:13 ` Jan Beulich [this message]
2021-04-15 16:22   ` [PATCH v2 11/12] x86/p2m: write_p2m_entry_{pre,post} hooks " Tim Deegan
2021-04-12 14:14 ` [PATCH v2 12/12] x86/p2m: re-arrange struct p2m_domain Jan Beulich
2021-04-30 10:59   ` Roger Pau Monné
2021-04-29  9:27 ` Ping: [PATCH v2 00/12] x86/p2m: restrict more code to build just for HVM Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=507182a8-38c3-cda3-d921-7494a5df63a7@suse.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=roger.pau@citrix.com \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.