From: Julien Grall <julien@xen.org>
To: xen-devel@lists.xenproject.org
Cc: julien@xen.org, "Hongyan Xia" <hongyxia@amazon.com>,
"Jan Beulich" <jbeulich@suse.com>,
"Andrew Cooper" <andrew.cooper3@citrix.com>,
"Roger Pau Monné" <roger.pau@citrix.com>, "Wei Liu" <wl@xen.org>,
"Julien Grall" <jgrall@amazon.com>
Subject: [PATCH 08/22] x86/pv: rewrite how building PV dom0 handles domheap mappings
Date: Fri, 16 Dec 2022 11:48:39 +0000 [thread overview]
Message-ID: <20221216114853.8227-9-julien@xen.org> (raw)
In-Reply-To: <20221216114853.8227-1-julien@xen.org>
From: Hongyan Xia <hongyxia@amazon.com>
Building a PV dom0 is allocating from the domheap but uses it like the
xenheap. This is clearly wrong. Fix.
Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
Signed-off-by: Julien Grall <jgrall@amazon.com>
----
Changes since Hongyan's version:
* Rebase
* Remove spurious newline
---
xen/arch/x86/pv/dom0_build.c | 56 +++++++++++++++++++++++++++---------
1 file changed, 42 insertions(+), 14 deletions(-)
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index c837b2d96f89..cd60f259d1b7 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -383,6 +383,10 @@ int __init dom0_construct_pv(struct domain *d,
l3_pgentry_t *l3tab = NULL, *l3start = NULL;
l2_pgentry_t *l2tab = NULL, *l2start = NULL;
l1_pgentry_t *l1tab = NULL, *l1start = NULL;
+ mfn_t l4start_mfn = INVALID_MFN;
+ mfn_t l3start_mfn = INVALID_MFN;
+ mfn_t l2start_mfn = INVALID_MFN;
+ mfn_t l1start_mfn = INVALID_MFN;
/*
* This fully describes the memory layout of the initial domain. All
@@ -711,22 +715,32 @@ int __init dom0_construct_pv(struct domain *d,
v->arch.pv.event_callback_cs = FLAT_COMPAT_KERNEL_CS;
}
+#define UNMAP_MAP_AND_ADVANCE(mfn_var, virt_var, maddr) \
+do { \
+ UNMAP_DOMAIN_PAGE(virt_var); \
+ mfn_var = maddr_to_mfn(maddr); \
+ maddr += PAGE_SIZE; \
+ virt_var = map_domain_page(mfn_var); \
+} while ( false )
+
if ( !compat )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
- l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ UNMAP_MAP_AND_ADVANCE(l4start_mfn, l4start, mpt_alloc);
+ l4tab = l4start;
clear_page(l4tab);
- init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
- d, INVALID_MFN, true);
- v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
+ init_xen_l4_slots(l4tab, l4start_mfn, d, INVALID_MFN, true);
+ v->arch.guest_table = pagetable_from_mfn(l4start_mfn);
}
else
{
/* Monitor table already created by switch_compat(). */
- l4start = l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
+ l4start_mfn = pagetable_get_mfn(v->arch.guest_table);
+ l4start = l4tab = map_domain_page(l4start_mfn);
/* See public/xen.h on why the following is needed. */
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table;
l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ UNMAP_MAP_AND_ADVANCE(l3start_mfn, l3start, mpt_alloc);
}
l4tab += l4_table_offset(v_start);
@@ -736,14 +750,16 @@ int __init dom0_construct_pv(struct domain *d,
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
- l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ UNMAP_MAP_AND_ADVANCE(l1start_mfn, l1start, mpt_alloc);
+ l1tab = l1start;
clear_page(l1tab);
if ( count == 0 )
l1tab += l1_table_offset(v_start);
if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
- l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ UNMAP_MAP_AND_ADVANCE(l2start_mfn, l2start, mpt_alloc);
+ l2tab = l2start;
clear_page(l2tab);
if ( count == 0 )
l2tab += l2_table_offset(v_start);
@@ -753,19 +769,19 @@ int __init dom0_construct_pv(struct domain *d,
{
maddr_to_page(mpt_alloc)->u.inuse.type_info =
PGT_l3_page_table;
- l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ UNMAP_MAP_AND_ADVANCE(l3start_mfn, l3start, mpt_alloc);
}
l3tab = l3start;
clear_page(l3tab);
if ( count == 0 )
l3tab += l3_table_offset(v_start);
- *l4tab = l4e_from_paddr(__pa(l3start), L4_PROT);
+ *l4tab = l4e_from_mfn(l3start_mfn, L4_PROT);
l4tab++;
}
- *l3tab = l3e_from_paddr(__pa(l2start), L3_PROT);
+ *l3tab = l3e_from_mfn(l2start_mfn, L3_PROT);
l3tab++;
}
- *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
+ *l2tab = l2e_from_mfn(l1start_mfn, L2_PROT);
l2tab++;
}
if ( count < initrd_pfn || count >= initrd_pfn + PFN_UP(initrd_len) )
@@ -792,9 +808,9 @@ int __init dom0_construct_pv(struct domain *d,
if ( !l3e_get_intpte(*l3tab) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
- l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
- clear_page(l2tab);
- *l3tab = l3e_from_paddr(__pa(l2tab), L3_PROT);
+ UNMAP_MAP_AND_ADVANCE(l2start_mfn, l2start, mpt_alloc);
+ clear_page(l2start);
+ *l3tab = l3e_from_mfn(l2start_mfn, L3_PROT);
}
if ( i == 3 )
l3e_get_page(*l3tab)->u.inuse.type_info |= PGT_pae_xen_l2;
@@ -805,9 +821,17 @@ int __init dom0_construct_pv(struct domain *d,
unmap_domain_page(l2t);
}
+#undef UNMAP_MAP_AND_ADVANCE
+
+ UNMAP_DOMAIN_PAGE(l1start);
+ UNMAP_DOMAIN_PAGE(l2start);
+ UNMAP_DOMAIN_PAGE(l3start);
+
/* Pages that are part of page tables must be read only. */
mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages, &flush_flags);
+ UNMAP_DOMAIN_PAGE(l4start);
+
/* Mask all upcalls... */
for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
@@ -977,8 +1001,12 @@ int __init dom0_construct_pv(struct domain *d,
* !CONFIG_VIDEO case so the logic here can be simplified.
*/
if ( pv_shim )
+ {
+ l4start = map_domain_page(l4start_mfn);
pv_shim_setup_dom(d, l4start, v_start, vxenstore_start, vconsole_start,
vphysmap_start, si);
+ UNMAP_DOMAIN_PAGE(l4start);
+ }
#ifdef CONFIG_COMPAT
if ( compat )
--
2.38.1
next prev parent reply other threads:[~2022-12-16 11:49 UTC|newest]
Thread overview: 87+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-16 11:48 [PATCH 00/22] Remove the directmap Julien Grall
2022-12-16 11:48 ` [PATCH 01/22] xen/common: page_alloc: Re-order includes Julien Grall
2022-12-16 12:03 ` Jan Beulich
2022-12-23 9:29 ` Julien Grall
2023-01-23 21:29 ` Stefano Stabellini
2023-01-23 21:57 ` Julien Grall
2022-12-16 11:48 ` [PATCH 02/22] x86/setup: move vm_init() before acpi calls Julien Grall
2022-12-20 15:08 ` Jan Beulich
2022-12-21 10:18 ` Julien Grall
2022-12-21 10:22 ` Jan Beulich
2022-12-23 9:51 ` Julien Grall
2022-12-23 9:51 ` Julien Grall
2023-01-23 21:34 ` Stefano Stabellini
2022-12-16 11:48 ` [PATCH 03/22] acpi: vmap pages in acpi_os_alloc_memory Julien Grall
2022-12-16 12:07 ` Julien Grall
2022-12-20 15:15 ` Jan Beulich
2022-12-21 10:23 ` Julien Grall
2023-01-23 21:39 ` Stefano Stabellini
2022-12-16 11:48 ` [PATCH 04/22] xen/numa: vmap the pages for memnodemap Julien Grall
2022-12-20 15:25 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 05/22] x86/srat: vmap the pages for acpi_slit Julien Grall
2022-12-20 15:30 ` Jan Beulich
2022-12-23 11:31 ` Julien Grall
2023-01-04 10:23 ` Jan Beulich
2023-01-12 23:15 ` Julien Grall
2023-01-13 9:16 ` Jan Beulich
2023-01-13 9:17 ` Julien Grall
2023-01-30 19:27 ` Julien Grall
2023-01-31 9:11 ` Jan Beulich
2023-01-31 21:37 ` Julien Grall
2022-12-16 11:48 ` [PATCH 06/22] x86: map/unmap pages in restore_all_guests Julien Grall
2022-12-22 11:12 ` Jan Beulich
2022-12-23 12:22 ` Julien Grall
2023-01-04 10:27 ` Jan Beulich
2023-01-12 23:20 ` Julien Grall
2023-01-13 9:22 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 07/22] x86/pv: domheap pages should be mapped while relocating initrd Julien Grall
2022-12-22 11:18 ` Jan Beulich
2022-12-16 11:48 ` Julien Grall [this message]
2022-12-22 11:48 ` [PATCH 08/22] x86/pv: rewrite how building PV dom0 handles domheap mappings Jan Beulich
2022-12-16 11:48 ` [PATCH 09/22] x86: lift mapcache variable to the arch level Julien Grall
2022-12-22 12:53 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 10/22] x86/mapcache: initialise the mapcache for the idle domain Julien Grall
2022-12-22 13:06 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 11/22] x86: add a boot option to enable and disable the direct map Julien Grall
2022-12-22 13:24 ` Jan Beulich
2023-01-23 21:45 ` Stefano Stabellini
2023-01-23 22:01 ` Julien Grall
2022-12-16 11:48 ` [PATCH 12/22] xen/arm: fixmap: Rename the fixmap slots to follow the x86 convention Julien Grall
2022-12-22 13:29 ` Jan Beulich
2023-01-06 14:54 ` Henry Wang
2023-01-23 21:47 ` Stefano Stabellini
2022-12-16 11:48 ` [PATCH 13/22] xen/x86: Add support for the PMAP Julien Grall
2023-01-05 16:46 ` Jan Beulich
2023-01-05 17:50 ` Julien Grall
2023-01-06 7:17 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 14/22] x86/domain_page: remove the fast paths when mfn is not in the directmap Julien Grall
2023-01-11 14:11 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 15/22] xen/page_alloc: add a path for xenheap when there is no direct map Julien Grall
2023-01-11 14:23 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 16/22] x86/setup: leave early boot slightly earlier Julien Grall
2023-01-11 14:34 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 17/22] x86/setup: vmap heap nodes when they are outside the direct map Julien Grall
2023-01-11 14:39 ` Jan Beulich
2023-01-23 22:03 ` Stefano Stabellini
2023-01-23 22:23 ` Julien Grall
2023-01-23 22:56 ` Stefano Stabellini
2022-12-16 11:48 ` [PATCH 18/22] x86/setup: do not create valid mappings when directmap=no Julien Grall
2023-01-11 14:47 ` Jan Beulich
2022-12-16 11:48 ` [PATCH 19/22] xen/arm32: mm: Rename 'first' to 'root' in init_secondary_pagetables() Julien Grall
2023-01-06 14:54 ` Henry Wang
2023-01-23 22:06 ` Stefano Stabellini
2022-12-16 11:48 ` [PATCH 20/22] xen/arm64: mm: Use per-pCPU page-tables Julien Grall
2023-01-06 14:54 ` Henry Wang
2023-01-06 15:44 ` Julien Grall
2023-01-07 2:22 ` Henry Wang
2023-01-23 22:21 ` Stefano Stabellini
2022-12-16 11:48 ` [PATCH 21/22] xen/arm64: Implement a mapcache for arm64 Julien Grall
2023-01-06 14:55 ` Henry Wang
2023-01-23 22:34 ` Stefano Stabellini
2022-12-16 11:48 ` [PATCH 22/22] xen/arm64: Allow the admin to enable/disable the directmap Julien Grall
2023-01-06 14:55 ` Henry Wang
2023-01-23 22:52 ` Stefano Stabellini
2023-01-23 23:09 ` Julien Grall
2023-01-24 0:12 ` Stefano Stabellini
2023-01-24 18:06 ` Julien Grall
2023-01-24 20:48 ` Stefano Stabellini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221216114853.8227-9-julien@xen.org \
--to=julien@xen.org \
--cc=andrew.cooper3@citrix.com \
--cc=hongyxia@amazon.com \
--cc=jbeulich@suse.com \
--cc=jgrall@amazon.com \
--cc=roger.pau@citrix.com \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.