All of lore.kernel.org
 help / color / mirror / Atom feed
From: Henry Wang <xin.wang2@amd.com>
To: <xen-devel@lists.xenproject.org>
Cc: Henry Wang <xin.wang2@amd.com>, Wei Liu <wl@xen.org>,
	Anthony PERARD <anthony.perard@citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Jan Beulich <jbeulich@suse.com>, Julien Grall <julien@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Alec Kwapis <alec.kwapis@medtronic.com>
Subject: [PATCH v2 5/5] xen/memory, tools: Make init-dom0less consume XEN_DOMCTL_get_mem_map
Date: Fri, 8 Mar 2024 09:54:35 +0800	[thread overview]
Message-ID: <20240308015435.4044339-6-xin.wang2@amd.com> (raw)
In-Reply-To: <20240308015435.4044339-1-xin.wang2@amd.com>

Previous commits enable the toolstack to get the domain memory map,
therefore instead of hardcoding the guest magic pages region, use
the XEN_DOMCTL_get_mem_map domctl to get the start address of the
guest magic pages region. Add the (XEN)MEMF_force_heap_alloc memory
flags to force populate_physmap() to allocate page from domheap
instead of using 1:1 or static allocated pages to map the magic pages.

Reported-by: Alec Kwapis <alec.kwapis@medtronic.com>
Signed-off-by: Henry Wang <xin.wang2@amd.com>
---
v2:
- New patch
---
 tools/helpers/init-dom0less.c | 22 ++++++++++++++++++----
 xen/common/memory.c           | 10 ++++++++--
 xen/include/public/memory.h   |  5 +++++
 xen/include/xen/mm.h          |  2 ++
 4 files changed, 33 insertions(+), 6 deletions(-)

diff --git a/tools/helpers/init-dom0less.c b/tools/helpers/init-dom0less.c
index fee93459c4..92c612f6da 100644
--- a/tools/helpers/init-dom0less.c
+++ b/tools/helpers/init-dom0less.c
@@ -23,16 +23,30 @@ static int alloc_xs_page(struct xc_interface_core *xch,
                          libxl_dominfo *info,
                          uint64_t *xenstore_pfn)
 {
-    int rc;
-    const xen_pfn_t base = GUEST_MAGIC_BASE >> XC_PAGE_SHIFT;
-    xen_pfn_t p2m = (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET;
+    int rc, i;
+    xen_pfn_t base = ((xen_pfn_t)-1);
+    xen_pfn_t p2m = ((xen_pfn_t)-1);
+    uint32_t nr_regions = XEN_MAX_MEM_REGIONS;
+    struct xen_mem_region mem_regions[XEN_MAX_MEM_REGIONS] = {0};
+
+    rc = xc_get_domain_mem_map(xch, info->domid, mem_regions, &nr_regions);
+
+    for ( i = 0; i < nr_regions; i++ )
+    {
+        if ( mem_regions[i].type == GUEST_MEM_REGION_MAGIC )
+        {
+            base = mem_regions[i].start >> XC_PAGE_SHIFT;
+            p2m = (mem_regions[i].start >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET;
+        }
+    }
 
     rc = xc_domain_setmaxmem(xch, info->domid,
                              info->max_memkb + (XC_PAGE_SIZE/1024));
     if (rc < 0)
         return rc;
 
-    rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0, 0, &p2m);
+    rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0,
+                                          XENMEMF_force_heap_alloc, &p2m);
     if (rc < 0)
         return rc;
 
diff --git a/xen/common/memory.c b/xen/common/memory.c
index b3b05c2ec0..18b6c16aed 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -219,7 +219,8 @@ static void populate_physmap(struct memop_args *a)
         }
         else
         {
-            if ( is_domain_direct_mapped(d) )
+            if ( is_domain_direct_mapped(d) &&
+                 !(a->memflags & MEMF_force_heap_alloc) )
             {
                 mfn = _mfn(gpfn);
 
@@ -246,7 +247,8 @@ static void populate_physmap(struct memop_args *a)
 
                 mfn = _mfn(gpfn);
             }
-            else if ( is_domain_using_staticmem(d) )
+            else if ( is_domain_using_staticmem(d) &&
+                      !(a->memflags & MEMF_force_heap_alloc) )
             {
                 /*
                  * No easy way to guarantee the retrieved pages are contiguous,
@@ -1433,6 +1435,10 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
              && (reservation.mem_flags & XENMEMF_populate_on_demand) )
             args.memflags |= MEMF_populate_on_demand;
 
+        if ( op == XENMEM_populate_physmap
+             && (reservation.mem_flags & XENMEMF_force_heap_alloc) )
+            args.memflags |= MEMF_force_heap_alloc;
+
         if ( xsm_memory_adjust_reservation(XSM_TARGET, curr_d, d) )
         {
             rcu_unlock_domain(d);
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 5e545ae9a4..2a1bfa5bfa 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -41,6 +41,11 @@
 #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
 /* Flag to indicate the node specified is virtual node */
 #define XENMEMF_vnode  (1<<18)
+/*
+ * Flag to force populate physmap to use pages from domheap instead of 1:1
+ * or static allocation.
+ */
+#define XENMEMF_force_heap_alloc  (1<<19)
 #endif
 
 struct xen_memory_reservation {
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index bb29b352ec..a4554f730d 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -205,6 +205,8 @@ struct npfec {
 #define  MEMF_no_icache_flush (1U<<_MEMF_no_icache_flush)
 #define _MEMF_no_scrub    8
 #define  MEMF_no_scrub    (1U<<_MEMF_no_scrub)
+#define _MEMF_force_heap_alloc 9
+#define  MEMF_force_heap_alloc (1U<<_MEMF_force_heap_alloc)
 #define _MEMF_node        16
 #define  MEMF_node_mask   ((1U << (8 * sizeof(nodeid_t))) - 1)
 #define  MEMF_node(n)     ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
-- 
2.34.1



  parent reply	other threads:[~2024-03-08  1:55 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-08  1:54 [PATCH v2 0/5] DOMCTL-based guest magic regions allocation for dom0less Henry Wang
2024-03-08  1:54 ` [PATCH v2 1/5] xen/arm: Rename assign_static_memory_11() for consistency Henry Wang
2024-03-08  8:18   ` Michal Orzel
2024-03-08  8:22     ` Henry Wang
2024-03-08  1:54 ` [PATCH v2 2/5] xen/domain.h: Centrialize is_domain_direct_mapped() Henry Wang
2024-03-08  8:59   ` Michal Orzel
2024-03-08  9:06     ` Henry Wang
2024-03-08  9:41       ` Jan Beulich
2024-03-11 18:02   ` Shawn Anastasio
2024-03-08  1:54 ` [PATCH v2 3/5] xen/domctl, tools: Introduce a new domctl to get guest memory map Henry Wang
2024-03-11  9:10   ` Michal Orzel
2024-03-11  9:46     ` Henry Wang
2024-03-11 16:58   ` Jan Beulich
2024-03-12  3:06     ` Henry Wang
2024-03-08  1:54 ` [PATCH v2 4/5] xen/arm: Find unallocated spaces for magic pages of direct-mapped domU Henry Wang
2024-03-11 13:46   ` Michal Orzel
2024-03-11 13:50     ` Michal Orzel
2024-03-12  3:25     ` Henry Wang
2024-03-13 11:09       ` Carlo Nonato
2024-03-08  1:54 ` Henry Wang [this message]
2024-03-11 17:07   ` [PATCH v2 5/5] xen/memory, tools: Make init-dom0less consume XEN_DOMCTL_get_mem_map Jan Beulich
2024-03-12  3:44     ` Henry Wang
2024-03-12  7:34       ` Jan Beulich
2024-03-12  7:36         ` Henry Wang
2024-03-29  5:11     ` Henry Wang
2024-04-02  7:05       ` Jan Beulich
2024-04-02  8:43         ` Henry Wang
2024-04-02  8:51           ` Jan Beulich
2024-04-02  9:03             ` Henry Wang
2024-03-25 15:35   ` Anthony PERARD
2024-03-26  1:21     ` Henry Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240308015435.4044339-6-xin.wang2@amd.com \
    --to=xin.wang2@amd.com \
    --cc=alec.kwapis@medtronic.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=anthony.perard@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien@xen.org \
    --cc=sstabellini@kernel.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.