From: Hongyan Xia <hx242@xen.org>
To: xen-devel@lists.xenproject.org
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
jgrall@amazon.com, "Wei Liu" <wl@xen.org>,
"Jan Beulich" <jbeulich@suse.com>,
"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH v8 05/15] x86/mm: switch to new APIs in modify_xen_mappings
Date: Mon, 27 Jul 2020 15:21:55 +0100 [thread overview]
Message-ID: <d6e921a4a33b0be1ae8147de268854556b08a3bc.1595857947.git.hongyxia@amazon.com> (raw)
In-Reply-To: <cover.1595857947.git.hongyxia@amazon.com>
In-Reply-To: <cover.1595857947.git.hongyxia@amazon.com>
From: Wei Liu <wei.liu2@citrix.com>
Page tables allocated in that function should be mapped and unmapped
now.
Note that pl2e now maybe mapped and unmapped in different iterations, so
we need to add clean-ups for that.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
Changed in v7:
- use normal unmap in the error path.
---
xen/arch/x86/mm.c | 57 +++++++++++++++++++++++++++++++++++--------------------
1 file changed, 36 insertions(+), 21 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index edcf164742..199940a345 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5527,7 +5527,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
{
bool locking = system_state > SYS_STATE_boot;
l3_pgentry_t *pl3e = NULL;
- l2_pgentry_t *pl2e;
+ l2_pgentry_t *pl2e = NULL;
l1_pgentry_t *pl1e;
unsigned int i;
unsigned long v = s;
@@ -5543,6 +5543,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
while ( v < e )
{
/* Clean up mappings mapped in the previous iteration. */
+ UNMAP_DOMAIN_PAGE(pl2e);
UNMAP_DOMAIN_PAGE(pl3e);
pl3e = virt_to_xen_l3e(v);
@@ -5560,6 +5561,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
{
l2_pgentry_t *l2t;
+ mfn_t l2mfn;
if ( l2_table_offset(v) == 0 &&
l1_table_offset(v) == 0 &&
@@ -5576,35 +5578,38 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
}
/* PAGE1GB: shatter the superpage and fall through. */
- l2t = alloc_xen_pagetable();
- if ( !l2t )
+ l2mfn = alloc_xen_pagetable_new();
+ if ( mfn_eq(l2mfn, INVALID_MFN) )
goto out;
+ l2t = map_domain_page(l2mfn);
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
l2e_write(l2t + i,
l2e_from_pfn(l3e_get_pfn(*pl3e) +
(i << PAGETABLE_ORDER),
l3e_get_flags(*pl3e)));
+ UNMAP_DOMAIN_PAGE(l2t);
+
if ( locking )
spin_lock(&map_pgdir_lock);
if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) &&
(l3e_get_flags(*pl3e) & _PAGE_PSE) )
{
- l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(l2t),
- __PAGE_HYPERVISOR));
- l2t = NULL;
+ l3e_write_atomic(pl3e,
+ l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR));
+ l2mfn = INVALID_MFN;
}
if ( locking )
spin_unlock(&map_pgdir_lock);
- if ( l2t )
- free_xen_pagetable(l2t);
+
+ free_xen_pagetable_new(l2mfn);
}
/*
* The L3 entry has been verified to be present, and we've dealt with
* 1G pages as well, so the L2 table cannot require allocation.
*/
- pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v);
+ pl2e = map_l2t_from_l3e(*pl3e) + l2_table_offset(v);
if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
{
@@ -5632,41 +5637,45 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
else
{
l1_pgentry_t *l1t;
-
/* PSE: shatter the superpage and try again. */
- l1t = alloc_xen_pagetable();
- if ( !l1t )
+ mfn_t l1mfn = alloc_xen_pagetable_new();
+
+ if ( mfn_eq(l1mfn, INVALID_MFN) )
goto out;
+ l1t = map_domain_page(l1mfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
l1e_write(&l1t[i],
l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
l2e_get_flags(*pl2e) & ~_PAGE_PSE));
+ UNMAP_DOMAIN_PAGE(l1t);
+
if ( locking )
spin_lock(&map_pgdir_lock);
if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
(l2e_get_flags(*pl2e) & _PAGE_PSE) )
{
- l2e_write_atomic(pl2e, l2e_from_mfn(virt_to_mfn(l1t),
+ l2e_write_atomic(pl2e, l2e_from_mfn(l1mfn,
__PAGE_HYPERVISOR));
- l1t = NULL;
+ l1mfn = INVALID_MFN;
}
if ( locking )
spin_unlock(&map_pgdir_lock);
- if ( l1t )
- free_xen_pagetable(l1t);
+
+ free_xen_pagetable_new(l1mfn);
}
}
else
{
l1_pgentry_t nl1e, *l1t;
+ mfn_t l1mfn;
/*
* Ordinary 4kB mapping: The L2 entry has been verified to be
* present, and we've dealt with 2M pages as well, so the L1 table
* cannot require allocation.
*/
- pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
+ pl1e = map_l1t_from_l2e(*pl2e) + l1_table_offset(v);
/* Confirm the caller isn't trying to create new mappings. */
if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
@@ -5677,6 +5686,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
(l1e_get_flags(*pl1e) & ~FLAGS_MASK) | nf);
l1e_write_atomic(pl1e, nl1e);
+ UNMAP_DOMAIN_PAGE(pl1e);
v += PAGE_SIZE;
/*
@@ -5706,10 +5716,12 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
continue;
}
- l1t = l2e_to_l1e(*pl2e);
+ l1mfn = l2e_get_mfn(*pl2e);
+ l1t = map_domain_page(l1mfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
if ( l1e_get_intpte(l1t[i]) != 0 )
break;
+ UNMAP_DOMAIN_PAGE(l1t);
if ( i == L1_PAGETABLE_ENTRIES )
{
/* Empty: zap the L2E and free the L1 page. */
@@ -5717,7 +5729,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
if ( locking )
spin_unlock(&map_pgdir_lock);
flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */
- free_xen_pagetable(l1t);
+ free_xen_pagetable_new(l1mfn);
}
else if ( locking )
spin_unlock(&map_pgdir_lock);
@@ -5748,11 +5760,13 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
{
l2_pgentry_t *l2t;
+ mfn_t l2mfn = l3e_get_mfn(*pl3e);
- l2t = l3e_to_l2e(*pl3e);
+ l2t = map_domain_page(l2mfn);
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
if ( l2e_get_intpte(l2t[i]) != 0 )
break;
+ UNMAP_DOMAIN_PAGE(l2t);
if ( i == L2_PAGETABLE_ENTRIES )
{
/* Empty: zap the L3E and free the L2 page. */
@@ -5760,7 +5774,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
if ( locking )
spin_unlock(&map_pgdir_lock);
flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */
- free_xen_pagetable(l2t);
+ free_xen_pagetable_new(l2mfn);
}
else if ( locking )
spin_unlock(&map_pgdir_lock);
@@ -5773,6 +5787,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
rc = 0;
out:
+ unmap_domain_page(pl2e);
unmap_domain_page(pl3e);
return rc;
}
--
2.16.6
next prev parent reply other threads:[~2020-07-27 14:22 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-27 14:21 [PATCH v8 00/15] switch to domheap for Xen page tables Hongyan Xia
2020-07-27 14:21 ` [PATCH v8 01/15] x86/mm: map_pages_to_xen would better have one exit path Hongyan Xia
2020-07-27 14:21 ` [PATCH v8 02/15] x86/mm: make sure there is one exit path for modify_xen_mappings Hongyan Xia
2020-07-27 14:21 ` [PATCH v8 03/15] x86/mm: rewrite virt_to_xen_l*e Hongyan Xia
2020-08-07 14:05 ` Jan Beulich
2020-08-13 16:08 ` Hongyan Xia
2020-08-13 17:22 ` Julien Grall
2020-08-18 8:49 ` Jan Beulich
2020-08-18 10:13 ` Julien Grall
2020-08-18 11:30 ` Jan Beulich
2020-08-18 13:08 ` Julien Grall
2020-08-18 16:16 ` Jan Beulich
2020-11-30 12:13 ` Hongyan Xia
2020-11-30 12:50 ` Jan Beulich
2020-11-30 14:13 ` Hongyan Xia
2020-11-30 14:47 ` Jan Beulich
2020-12-07 15:28 ` Hongyan Xia
2020-07-27 14:21 ` [PATCH v8 04/15] x86/mm: switch to new APIs in map_pages_to_xen Hongyan Xia
2020-07-27 14:21 ` Hongyan Xia [this message]
2020-07-27 14:21 ` [PATCH v8 06/15] x86_64/mm: introduce pl2e in paging_init Hongyan Xia
2020-07-27 14:21 ` [PATCH v8 07/15] x86_64/mm: switch to new APIs " Hongyan Xia
2020-08-07 14:09 ` Jan Beulich
2020-07-27 14:21 ` [PATCH v8 08/15] x86_64/mm: switch to new APIs in setup_m2p_table Hongyan Xia
2020-07-27 14:21 ` [PATCH v8 09/15] efi: use new page table APIs in copy_mapping Hongyan Xia
2020-08-07 14:13 ` Jan Beulich
2020-07-27 14:22 ` [PATCH v8 10/15] efi: switch to new APIs in EFI code Hongyan Xia
2020-07-27 14:22 ` [PATCH v8 11/15] x86/smpboot: add exit path for clone_mapping() Hongyan Xia
2020-07-27 14:22 ` [PATCH v8 12/15] x86/smpboot: switch clone_mapping() to new APIs Hongyan Xia
2020-07-27 14:22 ` [PATCH v8 13/15] x86/mm: drop old page table APIs Hongyan Xia
2020-07-27 14:22 ` [PATCH v8 14/15] x86: switch to use domheap page for page tables Hongyan Xia
2020-07-27 14:22 ` [PATCH v8 15/15] x86/mm: drop _new suffix for page table APIs Hongyan Xia
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=d6e921a4a33b0be1ae8147de268854556b08a3bc.1595857947.git.hongyxia@amazon.com \
--to=hx242@xen.org \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=jgrall@amazon.com \
--cc=roger.pau@citrix.com \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).