All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Paul Durrant" <paul@xen.org>
Subject: [PATCH 16/16] x86/P2M: the majority for struct p2m_domain's fields are HVM-only
Date: Mon, 5 Jul 2021 18:15:11 +0200	[thread overview]
Message-ID: <e5362b33-0f5b-ba2b-d033-ca0a09e5fd54@suse.com> (raw)
In-Reply-To: <d1fd572d-5bfe-21d8-3b50-d9b0646ce2f0@suse.com>

..., as are the majority of the locks involved. Conditionalize things
accordingly.

Also adjust the ioreq field's indentation at this occasion.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -481,8 +481,11 @@ unsigned int page_get_ram_type(mfn_t mfn
 
 unsigned long domain_get_maximum_gpfn(struct domain *d)
 {
+#ifdef CONFIG_HVM
     if ( is_hvm_domain(d) )
         return p2m_get_hostp2m(d)->max_mapped_pfn;
+#endif
+
     /* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. */
     return (arch_get_max_pfn(d) ?: 1) - 1;
 }
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -237,6 +237,8 @@ static inline void mm_enforce_order_unlo
  *                                                                      *
  ************************************************************************/
 
+#ifdef CONFIG_HVM
+
 /* Nested P2M lock (per-domain)
  *
  * A per-domain lock that protects the mapping from nested-CR3 to
@@ -354,6 +356,8 @@ declare_mm_lock(pod)
 #define pod_unlock(p)         mm_unlock(&(p)->pod.lock)
 #define pod_locked_by_me(p)   mm_locked_by_me(&(p)->pod.lock)
 
+#endif /* CONFIG_HVM */
+
 /* Page alloc lock (per-domain)
  *
  * This is an external lock, not represented by an mm_lock_t. However,
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -48,6 +48,8 @@
 #undef virt_to_mfn
 #define virt_to_mfn(v) _mfn(__virt_to_mfn(v))
 
+DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
+
 /* Turn on/off host superpage page table support for hap, default on. */
 bool_t __initdata opt_hap_1gb = 1, __initdata opt_hap_2mb = 1;
 boolean_param("hap_1gb", opt_hap_1gb);
--- a/xen/arch/x86/mm/p2m-basic.c
+++ b/xen/arch/x86/mm/p2m-basic.c
@@ -28,16 +28,15 @@
 #include "mm-locks.h"
 #include "p2m.h"
 
-DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
-
 /* Init the datastructures for later use by the p2m code */
 static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
 {
     int ret = 0;
 
-    mm_rwlock_init(&p2m->lock);
 #ifdef CONFIG_HVM
+    mm_rwlock_init(&p2m->lock);
     INIT_PAGE_LIST_HEAD(&p2m->pages);
+    spin_lock_init(&p2m->ioreq.lock);
 #endif
 
     p2m->domain = d;
@@ -55,8 +54,6 @@ static int p2m_initialise(struct domain
     else
         p2m_pt_init(p2m);
 
-    spin_lock_init(&p2m->ioreq.lock);
-
     return ret;
 }
 
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -338,7 +338,7 @@ bool arch_iommu_use_permitted(const stru
     return d == dom_io ||
            (likely(!mem_sharing_enabled(d)) &&
             likely(!mem_paging_enabled(d)) &&
-            likely(!p2m_get_hostp2m(d)->global_logdirty));
+            likely(!p2m_is_global_logdirty(d)));
 }
 
 /*
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -199,8 +199,10 @@ typedef enum {
 
 /* Per-p2m-table state */
 struct p2m_domain {
+#ifdef CONFIG_HVM
     /* Lock that protects updates to the p2m */
     mm_rwlock_t           lock;
+#endif
 
     /*
      * Same as a domain's dirty_cpumask but limited to
@@ -220,13 +222,14 @@ struct p2m_domain {
      */
     p2m_access_t default_access;
 
+#ifdef CONFIG_HVM
+
     /* Host p2m: Log-dirty ranges registered for the domain. */
     struct rangeset   *logdirty_ranges;
 
     /* Host p2m: Global log-dirty mode enabled for the domain. */
     bool               global_logdirty;
 
-#ifdef CONFIG_HVM
     /* Translated domain: p2m mapping */
     pagetable_t        phys_table;
 
@@ -269,7 +272,6 @@ struct p2m_domain {
                                               unsigned int level);
     void               (*write_p2m_entry_post)(struct p2m_domain *p2m,
                                                unsigned int oflags);
-#endif
 #if P2M_AUDIT
     long               (*audit_p2m)(struct p2m_domain *p2m);
 #endif
@@ -304,7 +306,6 @@ struct p2m_domain {
     unsigned long min_remapped_gfn;
     unsigned long max_remapped_gfn;
 
-#ifdef CONFIG_HVM
     /* Populate-on-demand variables
      * All variables are protected with the pod lock. We cannot rely on
      * the p2m lock if it's turned into a fine-grained lock.
@@ -361,27 +362,27 @@ struct p2m_domain {
      * threaded on in LRU order.
      */
     struct list_head   np2m_list;
-#endif
 
     union {
         struct ept_data ept;
         /* NPT-equivalent structure could be added here. */
     };
 
-     struct {
-         spinlock_t lock;
-         /*
-          * ioreq server who's responsible for the emulation of
-          * gfns with specific p2m type(for now, p2m_ioreq_server).
-          */
-         struct ioreq_server *server;
-         /*
-          * flags specifies whether read, write or both operations
-          * are to be emulated by an ioreq server.
-          */
-         unsigned int flags;
-         unsigned long entry_count;
-     } ioreq;
+    struct {
+        spinlock_t lock;
+        /*
+         * ioreq server who's responsible for the emulation of
+         * gfns with specific p2m type(for now, p2m_ioreq_server).
+         */
+        struct ioreq_server *server;
+        /*
+         * flags specifies whether read, write or both operations
+         * are to be emulated by an ioreq server.
+         */
+        unsigned int flags;
+        unsigned long entry_count;
+    } ioreq;
+#endif /* CONFIG_HVM */
 };
 
 /* get host p2m table */
@@ -645,6 +646,15 @@ int p2m_finish_type_change(struct domain
                            gfn_t first_gfn,
                            unsigned long max_nr);
 
+static inline bool p2m_is_global_logdirty(const struct domain *d)
+{
+#ifdef CONFIG_HVM
+    return p2m_get_hostp2m(d)->global_logdirty;
+#else
+    return false;
+#endif
+}
+
 int p2m_is_logdirty_range(struct p2m_domain *, unsigned long start,
                           unsigned long end);
 
@@ -792,6 +802,8 @@ extern void audit_p2m(struct domain *d,
 #define P2M_DEBUG(f, a...) do { (void)(f); } while(0)
 #endif
 
+#ifdef CONFIG_HVM
+
 /*
  * Functions specific to the p2m-pt implementation
  */
@@ -852,7 +864,7 @@ void nestedp2m_write_p2m_entry_post(stru
 /*
  * Alternate p2m: shadow p2m tables used for alternate memory views
  */
-#ifdef CONFIG_HVM
+
 /* get current alternate p2m table */
 static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
 {
@@ -905,10 +917,10 @@ int p2m_altp2m_propagate_change(struct d
 /* Set a specific p2m view visibility */
 int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int idx,
                                    uint8_t visible);
-#else
+#else /* CONFIG_HVM */
 struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
 static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) {}
-#endif
+#endif /* CONFIG_HVM */
 
 /*
  * p2m type to IOMMU flags
@@ -942,6 +954,8 @@ static inline unsigned int p2m_get_iommu
     return flags;
 }
 
+#ifdef CONFIG_HVM
+
 int p2m_set_ioreq_server(struct domain *d, unsigned int flags,
                          struct ioreq_server *s);
 struct ioreq_server *p2m_get_ioreq_server(struct domain *d,
@@ -1006,6 +1020,8 @@ static inline int p2m_entry_modify(struc
     return 0;
 }
 
+#endif /* CONFIG_HVM */
+
 #endif /* _XEN_ASM_X86_P2M_H */
 
 /*



  parent reply	other threads:[~2021-07-05 16:15 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-05 16:03 [PATCH 00/16] x86/mm: large parts of P2M code and struct p2m_domain are HVM-only Jan Beulich
2021-07-05 16:05 ` Jan Beulich
2021-07-05 16:05 ` [PATCH 01/16] x86/P2M: rename p2m_remove_page() Jan Beulich
2022-02-04 21:54   ` George Dunlap
2022-02-07  9:20     ` Jan Beulich
2021-07-05 16:06 ` [PATCH 02/16] x86/P2M: introduce p2m_{add,remove}_page() Jan Beulich
2021-07-05 17:47   ` Paul Durrant
2021-07-06  7:05     ` Jan Beulich
2022-02-04 22:07   ` George Dunlap
2022-02-07  9:38     ` Jan Beulich
2022-02-07 15:49       ` George Dunlap
2021-07-05 16:06 ` [PATCH 03/16] x86/P2M: drop a few CONFIG_HVM Jan Beulich
2022-02-04 22:13   ` George Dunlap
2022-02-07  9:51     ` Jan Beulich
2021-07-05 16:07 ` [PATCH 04/16] x86/P2M: move map_domain_gfn() (again) Jan Beulich
2022-02-04 22:17   ` George Dunlap
2021-07-05 16:07 ` [PATCH 05/16] x86/mm: move guest_physmap_{add,remove}_page() Jan Beulich
2022-02-05 21:06   ` George Dunlap
2021-07-05 16:07 ` [PATCH 06/16] x86/mm: split set_identity_p2m_entry() into PV and HVM parts Jan Beulich
2022-02-05 21:09   ` George Dunlap
2021-07-05 16:09 ` [PATCH 07/16] x86/P2M: p2m_{alloc,free}_ptp() and p2m_alloc_table() are HVM-only Jan Beulich
2021-07-07  1:35   ` Tian, Kevin
2022-02-05 21:17   ` George Dunlap
2021-07-05 16:09 ` [PATCH 08/16] x86/P2M: PoD, altp2m, and nested-p2m " Jan Beulich
2022-02-05 21:29   ` George Dunlap
2022-02-07 10:11     ` Jan Beulich
2022-02-07 14:45       ` George Dunlap
2022-02-07 15:23         ` Jan Beulich
2021-07-05 16:10 ` [PATCH 09/16] x86/P2M: split out init/teardown functions Jan Beulich
2022-02-05 21:31   ` George Dunlap
2021-07-05 16:10 ` [PATCH 10/16] x86/P2M: p2m_get_page_from_gfn() is HVM-only Jan Beulich
2022-02-14 14:26   ` George Dunlap
2021-07-05 16:12 ` [PATCH 11/16] x86/P2M: derive a HVM-only variant from __get_gfn_type_access() Jan Beulich
2022-02-14 15:12   ` George Dunlap
2022-02-14 15:20     ` Jan Beulich
2021-07-05 16:12 ` [PATCH 12/16] x86/p2m: re-arrange {,__}put_gfn() Jan Beulich
2022-02-14 15:17   ` George Dunlap
2021-07-05 16:13 ` [PATCH 13/16] shr_pages field is MEM_SHARING-only Jan Beulich
2021-07-06 12:42   ` Tamas K Lengyel
2022-02-14 15:36   ` George Dunlap
2021-07-05 16:14 ` [PATCH 14/16] paged_pages field is MEM_PAGING-only Jan Beulich
2021-07-06 12:44   ` Tamas K Lengyel
2022-02-14 15:38   ` George Dunlap
2021-07-05 16:14 ` [PATCH 15/16] x86/P2M: p2m.c is HVM-only Jan Beulich
2022-02-14 15:39   ` George Dunlap
2021-07-05 16:15 ` Jan Beulich [this message]
2021-07-05 17:49   ` [PATCH 16/16] x86/P2M: the majority for struct p2m_domain's fields are HVM-only Paul Durrant
2022-02-14 15:51   ` George Dunlap
2022-02-14 16:07     ` Jan Beulich
2022-02-16  7:54       ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e5362b33-0f5b-ba2b-d033-ca0a09e5fd54@suse.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=paul@xen.org \
    --cc=roger.pau@citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.