All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Julien Grall" <julien@xen.org>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH RFC 03/10] domain: GADDR based shared guest area registration alternative - teardown
Date: Wed, 19 Oct 2022 09:40:29 +0200	[thread overview]
Message-ID: <214c9ec9-b948-1ca6-24d6-4e7f8852ac45@suse.com> (raw)
In-Reply-To: <bcab8340-6bfd-8dfc-efe1-564e520b3a06@suse.com>

In preparation of the introduction of new vCPU operations allowing to
register the respective areas (one of the two is x86-specific) by
guest-physical address, add the necessary domain cleanup hooks.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
RFC: Zapping the areas in pv_shim_shutdown() may not be strictly
     necessary: Aiui unmap_vcpu_info() is called only because the vCPU
     info area cannot be re-registered. Beyond that I guess the
     assumption is that the areas would only be re-registered as they
     were before. If that's not the case I wonder whether the guest
     handles for both areas shouldn't also be zapped.

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1035,7 +1035,10 @@ int arch_domain_soft_reset(struct domain
     }
 
     for_each_vcpu ( d, v )
+    {
         set_xen_guest_handle(v->arch.time_info_guest, NULL);
+        unmap_guest_area(v, &v->arch.time_guest_area);
+    }
 
  exit_put_gfn:
     put_gfn(d, gfn_x(gfn));
@@ -2350,6 +2353,8 @@ int domain_relinquish_resources(struct d
             if ( ret )
                 return ret;
 
+            unmap_guest_area(v, &v->arch.time_guest_area);
+
             vpmu_destroy(v);
         }
 
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -661,6 +661,7 @@ struct arch_vcpu
 
     /* A secondary copy of the vcpu time info. */
     XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
+    struct guest_area time_guest_area;
 
     struct arch_vm_event *vm_event;
 
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -394,8 +394,10 @@ int pv_shim_shutdown(uint8_t reason)
 
     for_each_vcpu ( d, v )
     {
-        /* Unmap guest vcpu_info pages. */
+        /* Unmap guest vcpu_info page and runstate/time areas. */
         unmap_vcpu_info(v);
+        unmap_guest_area(v, &v->runstate_guest_area);
+        unmap_guest_area(v, &v->arch.time_guest_area);
 
         /* Reset the periodic timer to the default value. */
         vcpu_set_periodic_timer(v, MILLISECS(10));
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -950,7 +950,10 @@ int domain_kill(struct domain *d)
         if ( cpupool_move_domain(d, cpupool0) )
             return -ERESTART;
         for_each_vcpu ( d, v )
+        {
             unmap_vcpu_info(v);
+            unmap_guest_area(v, &v->runstate_guest_area);
+        }
         d->is_dying = DOMDYING_dead;
         /* Mem event cleanup has to go here because the rings 
          * have to be put before we call put_domain. */
@@ -1404,6 +1407,7 @@ int domain_soft_reset(struct domain *d,
     {
         set_xen_guest_handle(runstate_guest(v), NULL);
         unmap_vcpu_info(v);
+        unmap_guest_area(v, &v->runstate_guest_area);
     }
 
     rc = arch_domain_soft_reset(d);
@@ -1555,6 +1559,15 @@ void unmap_vcpu_info(struct vcpu *v)
     put_page_and_type(mfn_to_page(mfn));
 }
 
+/*
+ * This is only intended to be used for domain cleanup (or more generally only
+ * with at least the respective vCPU, if it's not the current one, reliably
+ * paused).
+ */
+void unmap_guest_area(struct vcpu *v, struct guest_area *area)
+{
+}
+
 int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     struct vcpu_guest_context *ctxt;
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -5,6 +5,12 @@
 #include <xen/types.h>
 
 #include <public/xen.h>
+
+struct guest_area {
+    struct page_info *pg;
+    void *map;
+};
+
 #include <asm/domain.h>
 #include <asm/numa.h>
 
@@ -76,6 +82,11 @@ void arch_vcpu_destroy(struct vcpu *v);
 int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned int offset);
 void unmap_vcpu_info(struct vcpu *v);
 
+int map_guest_area(struct vcpu *v, paddr_t gaddr, unsigned int size,
+                   struct guest_area *area,
+                   void (*populate)(void *dst, struct vcpu *v));
+void unmap_guest_area(struct vcpu *v, struct guest_area *area);
+
 int arch_domain_create(struct domain *d,
                        struct xen_domctl_createdomain *config,
                        unsigned int flags);
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -202,6 +202,7 @@ struct vcpu
         XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat;
     } runstate_guest; /* guest address */
 #endif
+    struct guest_area runstate_guest_area;
     unsigned int     new_state;
 
     /* Has the FPU been initialised? */



  parent reply	other threads:[~2022-10-19  7:40 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-19  7:35 [PATCH RFC 00/10] runstate/time area registration by (guest) physical address Jan Beulich
2022-10-19  7:38 ` [PATCH 01/10] unify update_runstate_area() Jan Beulich
2022-11-24 20:43   ` Julien Grall
2022-10-19  7:39 ` [PATCH 02/10] x86: split populating of struct vcpu_time_info into a separate function Jan Beulich
2023-01-17 20:19   ` Andrew Cooper
2023-01-18  7:32     ` Jan Beulich
2022-10-19  7:40 ` Jan Beulich [this message]
2022-12-13 21:44   ` [PATCH RFC 03/10] domain: GADDR based shared guest area registration alternative - teardown Julien Grall
2022-12-14  9:12     ` Jan Beulich
2023-01-17 21:17   ` Andrew Cooper
2023-01-18  8:59     ` Jan Beulich
2022-10-19  7:41 ` [PATCH RFC 04/10] domain: update GADDR based runstate guest area Jan Beulich
2022-12-16 12:26   ` Julien Grall
2022-12-19 12:48     ` Jan Beulich
2022-12-20  8:40       ` Julien Grall
2022-12-20  8:45         ` Jan Beulich
2022-12-20  8:48           ` Julien Grall
2022-12-20  9:59             ` Jan Beulich
2022-12-20 10:01               ` Julien Grall
2022-10-19  7:41 ` [PATCH RFC 05/10] x86: update GADDR based secondary time area Jan Beulich
2023-01-17 20:31   ` Andrew Cooper
2023-01-18  9:25     ` Jan Beulich
2022-10-19  7:42 ` [PATCH 06/10] x86/mem-sharing: copy GADDR based shared guest areas Jan Beulich
2022-10-24 23:04   ` Tamas K Lengyel
2022-10-25  6:18     ` Jan Beulich
2022-10-19  7:43 ` [PATCH RFC 07/10] domain: map/unmap " Jan Beulich
2022-11-24 21:29   ` Julien Grall
2022-11-28  9:01     ` Jan Beulich
2022-11-29  8:40       ` Julien Grall
2022-11-29 14:02         ` Jan Beulich
2022-12-06 18:27           ` Julien Grall
2022-12-07  8:29             ` Jan Beulich
2023-01-17 22:20     ` Andrew Cooper
2023-01-17 22:45       ` Julien Grall
2023-01-18  9:59       ` Jan Beulich
2023-01-20 17:44         ` Andrew Cooper
2023-01-17 22:04   ` Andrew Cooper
2023-01-18  9:55     ` Jan Beulich
2023-01-20 18:15       ` Andrew Cooper
2023-01-23  8:23         ` Jan Beulich
2023-01-23  8:29         ` Jan Beulich
2022-10-19  7:44 ` [PATCH 08/10] domain: introduce GADDR based runstate area registration alternative Jan Beulich
2022-10-19  7:45 ` [PATCH 09/10] x86: introduce GADDR based secondary time " Jan Beulich
2022-10-19  7:45 ` [PATCH RFC 10/10] common: convert vCPU info area registration Jan Beulich
2022-12-17 15:53   ` Julien Grall
2022-12-19 15:01     ` Jan Beulich
2022-12-20  8:52       ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=214c9ec9-b948-1ca6-24d6-4e7f8852ac45@suse.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=julien@xen.org \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.