All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Ian Jackson" <iwj@xenproject.org>,
	"Julien Grall" <julien@xen.org>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"Daniel de Graaf" <dgdegra@tycho.nsa.gov>,
	"Paul Durrant" <paul@xen.org>,
	"Tamas K Lengyel" <tamas@tklengyel.com>,
	"Petre Pircalabu" <ppircalabu@bitdefender.com>,
	"Alexandru Isaila" <aisaila@bitdefender.com>
Subject: [PATCH v2 09/12] x86: make mem-paging configuarable and default it to off for being unsupported
Date: Mon, 12 Apr 2021 16:12:41 +0200	[thread overview]
Message-ID: <26dae9ef-5fcb-f806-059d-7cdd2974ad40@suse.com> (raw)
In-Reply-To: <3cf73378-b9d6-0eca-12b6-0f628518bebf@suse.com>

While doing so, make the option dependent upon HVM, which really is the
main purpose of the change.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.

--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -15,7 +15,6 @@ config X86
 	select HAS_FAST_MULTIPLY
 	select HAS_IOPORTS
 	select HAS_KEXEC
-	select HAS_MEM_PAGING
 	select HAS_NS16550
 	select HAS_PASSTHROUGH
 	select HAS_PCI
@@ -251,6 +250,10 @@ config HYPERV_GUEST
 
 endif
 
+config MEM_PAGING
+	bool "Xen memory paging support (UNSUPPORTED)" if UNSUPPORTED
+	depends on HVM
+
 config MEM_SHARING
 	bool "Xen memory sharing support (UNSUPPORTED)" if UNSUPPORTED
 	depends on HVM
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1932,9 +1932,11 @@ int hvm_hap_nested_page_fault(paddr_t gp
         goto out_put_gfn;
     }
 
+#ifdef CONFIG_MEM_PAGING
     /* Check if the page has been paged out */
     if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
         paged = 1;
+#endif
 
 #ifdef CONFIG_MEM_SHARING
     /* Mem sharing: if still shared on write access then its enomem */
--- a/xen/arch/x86/mm/Makefile
+++ b/xen/arch/x86/mm/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_HVM) += altp2m.o
 obj-$(CONFIG_HVM) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
 obj-$(CONFIG_SHADOW_PAGING) += guest_walk_4.o
 obj-$(CONFIG_MEM_ACCESS) += mem_access.o
-obj-y += mem_paging.o
+obj-$(CONFIG_MEM_PAGING) += mem_paging.o
 obj-$(CONFIG_MEM_SHARING) += mem_sharing.o
 obj-y += p2m.o p2m-pt.o
 obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -155,8 +155,10 @@ int compat_arch_memory_op(unsigned long
     case XENMEM_get_sharing_shared_pages:
         return mem_sharing_get_nr_shared_mfns();
 
+#ifdef CONFIG_MEM_PAGING
     case XENMEM_paging_op:
         return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
+#endif
 
 #ifdef CONFIG_MEM_SHARING
     case XENMEM_sharing_op:
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -994,8 +994,10 @@ long subarch_memory_op(unsigned long cmd
     case XENMEM_get_sharing_shared_pages:
         return mem_sharing_get_nr_shared_mfns();
 
+#ifdef CONFIG_MEM_PAGING
     case XENMEM_paging_op:
         return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
+#endif
 
 #ifdef CONFIG_MEM_SHARING
     case XENMEM_sharing_op:
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -40,9 +40,6 @@ config HAS_IOPORTS
 config HAS_KEXEC
 	bool
 
-config HAS_MEM_PAGING
-	bool
-
 config HAS_PDX
 	bool
 
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1096,7 +1096,7 @@ static void complete_domain_destroy(stru
     free_xenoprof_pages(d);
 #endif
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
     xfree(d->vm_event_paging);
 #endif
     xfree(d->vm_event_monitor);
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1856,7 +1856,7 @@ int check_get_page_from_gfn(struct domai
 
     page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q);
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
     if ( p2m_is_paging(p2mt) )
     {
         if ( page )
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -390,7 +390,7 @@ static int vm_event_resume(struct domain
         /* Check flags which apply only when the vCPU is paused */
         if ( atomic_read(&v->vm_event_pause_count) )
         {
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
             if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
                 p2m_mem_paging_resume(d, &rsp);
 #endif
@@ -521,7 +521,7 @@ int __vm_event_claim_slot(struct domain
         return vm_event_grab_slot(ved, current->domain != d);
 }
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_paging_notification(struct vcpu *v, unsigned int port)
 {
@@ -546,7 +546,7 @@ static void mem_sharing_notification(str
 /* Clean up on domain destruction */
 void vm_event_cleanup(struct domain *d)
 {
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
     if ( vm_event_check_ring(d->vm_event_paging) )
     {
         /* Destroying the wait queue head means waking up all
@@ -613,7 +613,7 @@ int vm_event_domctl(struct domain *d, st
 
     switch ( vec->mode )
     {
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
     case XEN_DOMCTL_VM_EVENT_OP_PAGING:
     {
         rc = -EINVAL;
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -23,6 +23,7 @@
 
 #include <asm/hvm/io.h>
 #include <asm/io_apic.h>
+#include <asm/mem_paging.h>
 #include <asm/setup.h>
 
 const struct iommu_init_ops *__initdata iommu_init_ops;
@@ -336,7 +337,7 @@ bool arch_iommu_use_permitted(const stru
      */
     return d == dom_io ||
            (likely(!mem_sharing_enabled(d)) &&
-            likely(!vm_event_check_ring(d->vm_event_paging)) &&
+            likely(!mem_paging_enabled(d)) &&
             likely(!p2m_get_hostp2m(d)->global_logdirty));
 }
 
--- a/xen/include/asm-x86/mem_paging.h
+++ b/xen/include/asm-x86/mem_paging.h
@@ -24,6 +24,12 @@
 
 int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg);
 
+#ifdef CONFIG_MEM_PAGING
+# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging)
+#else
+# define mem_paging_enabled(d) false
+#endif
+
 #endif /*__ASM_X86_MEM_PAGING_H__ */
 
 /*
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -136,11 +136,16 @@ typedef unsigned int p2m_query_t;
 #define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw) \
                             | p2m_to_mask(p2m_ram_logdirty) )
 
+#ifdef CONFIG_MEM_PAGING
 #define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out)        \
                           | p2m_to_mask(p2m_ram_paged)           \
                           | p2m_to_mask(p2m_ram_paging_in))
 
 #define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
+#else
+#define P2M_PAGING_TYPES 0
+#define P2M_PAGED_TYPES 0
+#endif
 
 /* Shared types */
 /* XXX: Sharable types could include p2m_ram_ro too, but we would need to
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -530,7 +530,7 @@ struct domain
     struct domain *parent; /* VM fork parent */
 #endif
     /* Memory paging support */
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
     struct vm_event_domain *vm_event_paging;
 #endif
     /* VM event monitor support */
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -592,7 +592,7 @@ static XSM_INLINE int xsm_mem_access(XSM
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
 static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -146,7 +146,7 @@ struct xsm_operations {
     int (*mem_access) (struct domain *d);
 #endif
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
     int (*mem_paging) (struct domain *d);
 #endif
 
@@ -592,7 +592,7 @@ static inline int xsm_mem_access (xsm_de
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
 static inline int xsm_mem_paging (xsm_default_t def, struct domain *d)
 {
     return xsm_ops->mem_paging(d);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -124,7 +124,7 @@ void __init xsm_fixup_ops (struct xsm_op
     set_to_dummy_if_null(ops, mem_access);
 #endif
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
     set_to_dummy_if_null(ops, mem_paging);
 #endif
 
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1256,7 +1256,7 @@ static int flask_mem_access(struct domai
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
 static int flask_mem_paging(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_PAGING);
@@ -1829,7 +1829,7 @@ static struct xsm_operations flask_ops =
     .mem_access = flask_mem_access,
 #endif
 
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
     .mem_paging = flask_mem_paging,
 #endif
 



  parent reply	other threads:[~2021-04-12 14:12 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-12 14:03 [PATCH v2 00/12] x86/p2m: restrict more code to build just for HVM Jan Beulich
2021-04-12 14:05 ` [PATCH v2 01/12] x86/p2m: set_{foreign,mmio}_p2m_entry() are HVM-only Jan Beulich
2021-04-29 13:17   ` Roger Pau Monné
2021-04-29 14:09     ` Jan Beulich
2021-04-29 15:06       ` Roger Pau Monné
2021-04-12 14:06 ` [PATCH v2 02/12] x86/p2m: {,un}map_mmio_regions() " Jan Beulich
2021-04-29 14:48   ` Roger Pau Monné
2021-04-29 15:01     ` Jan Beulich
2021-04-12 14:07 ` [PATCH v2 03/12] x86/mm: the gva_to_gfn() hook is HVM-only Jan Beulich
2021-04-15 16:22   ` Tim Deegan
2021-04-12 14:07 ` [PATCH v2 04/12] AMD/IOMMU: guest IOMMU support is for HVM only Jan Beulich
2021-04-29 15:14   ` Roger Pau Monné
2021-04-12 14:08 ` [PATCH v2 05/12] x86/p2m: change_entry_type_* hooks are HVM-only Jan Beulich
2021-04-29 15:49   ` Roger Pau Monné
2021-04-12 14:08 ` [PATCH v2 06/12] x86/p2m: hardware-log-dirty related " Jan Beulich
2021-04-29 16:05   ` Roger Pau Monné
2021-04-12 14:09 ` [PATCH v2 07/12] x86/p2m: the recalc hook is HVM-only Jan Beulich
2021-04-30  9:27   ` Roger Pau Monné
2021-04-12 14:10 ` [PATCH v2 08/12] x86: mem-access " Jan Beulich
2021-04-12 14:16   ` Tamas K Lengyel
2021-04-12 14:48   ` Isaila Alexandru
2021-04-12 14:12 ` Jan Beulich [this message]
2021-04-12 14:18   ` [PATCH v2 09/12] x86: make mem-paging configuarable and default it to off for being unsupported Tamas K Lengyel
2021-04-12 14:47     ` Isaila Alexandru
2021-04-12 14:27   ` Isaila Alexandru
2021-04-30  9:55   ` Roger Pau Monné
2021-04-30 14:16     ` Jan Beulich
2021-04-30 14:37       ` Roger Pau Monné
2021-04-12 14:13 ` [PATCH v2 10/12] x86/p2m: {get,set}_entry hooks and p2m-pt.c are HVM-only Jan Beulich
2021-04-30 10:57   ` Roger Pau Monné
2021-04-12 14:13 ` [PATCH v2 11/12] x86/p2m: write_p2m_entry_{pre,post} hooks " Jan Beulich
2021-04-15 16:22   ` Tim Deegan
2021-04-12 14:14 ` [PATCH v2 12/12] x86/p2m: re-arrange struct p2m_domain Jan Beulich
2021-04-30 10:59   ` Roger Pau Monné
2021-04-29  9:27 ` Ping: [PATCH v2 00/12] x86/p2m: restrict more code to build just for HVM Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=26dae9ef-5fcb-f806-059d-7cdd2974ad40@suse.com \
    --to=jbeulich@suse.com \
    --cc=aisaila@bitdefender.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=george.dunlap@citrix.com \
    --cc=iwj@xenproject.org \
    --cc=julien@xen.org \
    --cc=paul@xen.org \
    --cc=ppircalabu@bitdefender.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=tamas@tklengyel.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.