From: Hongyan Xia <hx242@xen.org>
To: xen-devel@lists.xenproject.org
Cc: jgrall@amazon.com, "Jan Beulich" <jbeulich@suse.com>,
"Andrew Cooper" <andrew.cooper3@citrix.com>,
"Roger Pau Monné" <roger.pau@citrix.com>, "Wei Liu" <wl@xen.org>
Subject: [PATCH v9 02/13] x86/mm: switch to new APIs in map_pages_to_xen
Date: Tue, 6 Apr 2021 12:05:50 +0100 [thread overview]
Message-ID: <d179b2d13d1f8fb25ee597cfcfb7a03e6cb87ab4.1617706782.git.hongyxia@amazon.com> (raw)
In-Reply-To: <cover.1617706782.git.hongyxia@amazon.com>
In-Reply-To: <cover.1617706782.git.hongyxia@amazon.com>
From: Wei Liu <wei.liu2@citrix.com>
Page tables allocated in that function should be mapped and unmapped
now.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
xen/arch/x86/mm.c | 60 ++++++++++++++++++++++++++++-------------------
1 file changed, 36 insertions(+), 24 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9705fed195f1..c49e8554f9f7 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5199,7 +5199,7 @@ int map_pages_to_xen(
}
else
{
- l2_pgentry_t *l2t = l3e_to_l2e(ol3e);
+ l2_pgentry_t *l2t = map_l2t_from_l3e(ol3e);
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
{
@@ -5211,10 +5211,11 @@ int map_pages_to_xen(
else
{
unsigned int j;
- const l1_pgentry_t *l1t = l2e_to_l1e(ol2e);
+ const l1_pgentry_t *l1t = map_l1t_from_l2e(ol2e);
for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
flush_flags(l1e_get_flags(l1t[j]));
+ unmap_domain_page(l1t);
}
}
flush_area(virt, flush_flags);
@@ -5223,9 +5224,10 @@ int map_pages_to_xen(
ol2e = l2t[i];
if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) &&
!(l2e_get_flags(ol2e) & _PAGE_PSE) )
- free_xen_pagetable(l2e_to_l1e(ol2e));
+ free_xen_pagetable_new(l2e_get_mfn(ol2e));
}
- free_xen_pagetable(l2t);
+ unmap_domain_page(l2t);
+ free_xen_pagetable_new(l3e_get_mfn(ol3e));
}
}
@@ -5242,6 +5244,7 @@ int map_pages_to_xen(
unsigned int flush_flags =
FLUSH_TLB | FLUSH_ORDER(2 * PAGETABLE_ORDER);
l2_pgentry_t *l2t;
+ mfn_t l2mfn;
/* Skip this PTE if there is no change. */
if ( ((l3e_get_pfn(ol3e) & ~(L2_PAGETABLE_ENTRIES *
@@ -5263,15 +5266,17 @@ int map_pages_to_xen(
continue;
}
- l2t = alloc_xen_pagetable();
- if ( l2t == NULL )
+ l2mfn = alloc_xen_pagetable_new();
+ if ( mfn_eq(l2mfn, INVALID_MFN) )
goto out;
+ l2t = map_domain_page(l2mfn);
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
l2e_write(l2t + i,
l2e_from_pfn(l3e_get_pfn(ol3e) +
(i << PAGETABLE_ORDER),
l3e_get_flags(ol3e)));
+ UNMAP_DOMAIN_PAGE(l2t);
if ( l3e_get_flags(ol3e) & _PAGE_GLOBAL )
flush_flags |= FLUSH_TLB_GLOBAL;
@@ -5281,15 +5286,15 @@ int map_pages_to_xen(
if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) &&
(l3e_get_flags(*pl3e) & _PAGE_PSE) )
{
- l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(l2t),
- __PAGE_HYPERVISOR));
- l2t = NULL;
+ l3e_write_atomic(pl3e,
+ l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR));
+ l2mfn = INVALID_MFN;
}
if ( locking )
spin_unlock(&map_pgdir_lock);
flush_area(virt, flush_flags);
- if ( l2t )
- free_xen_pagetable(l2t);
+
+ free_xen_pagetable_new(l2mfn);
}
pl2e = virt_to_xen_l2e(virt);
@@ -5317,12 +5322,13 @@ int map_pages_to_xen(
}
else
{
- l1_pgentry_t *l1t = l2e_to_l1e(ol2e);
+ l1_pgentry_t *l1t = map_l1t_from_l2e(ol2e);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
flush_flags(l1e_get_flags(l1t[i]));
flush_area(virt, flush_flags);
- free_xen_pagetable(l1t);
+ unmap_domain_page(l1t);
+ free_xen_pagetable_new(l2e_get_mfn(ol2e));
}
}
@@ -5347,6 +5353,7 @@ int map_pages_to_xen(
unsigned int flush_flags =
FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER);
l1_pgentry_t *l1t;
+ mfn_t l1mfn;
/* Skip this PTE if there is no change. */
if ( (((l2e_get_pfn(*pl2e) & ~(L1_PAGETABLE_ENTRIES - 1)) +
@@ -5366,14 +5373,16 @@ int map_pages_to_xen(
goto check_l3;
}
- l1t = alloc_xen_pagetable();
- if ( l1t == NULL )
+ l1mfn = alloc_xen_pagetable_new();
+ if ( mfn_eq(l1mfn, INVALID_MFN) )
goto out;
+ l1t = map_domain_page(l1mfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
l1e_write(&l1t[i],
l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
lNf_to_l1f(l2e_get_flags(*pl2e))));
+ UNMAP_DOMAIN_PAGE(l1t);
if ( l2e_get_flags(*pl2e) & _PAGE_GLOBAL )
flush_flags |= FLUSH_TLB_GLOBAL;
@@ -5383,20 +5392,21 @@ int map_pages_to_xen(
if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
(l2e_get_flags(*pl2e) & _PAGE_PSE) )
{
- l2e_write_atomic(pl2e, l2e_from_mfn(virt_to_mfn(l1t),
+ l2e_write_atomic(pl2e, l2e_from_mfn(l1mfn,
__PAGE_HYPERVISOR));
- l1t = NULL;
+ l1mfn = INVALID_MFN;
}
if ( locking )
spin_unlock(&map_pgdir_lock);
flush_area(virt, flush_flags);
- if ( l1t )
- free_xen_pagetable(l1t);
+
+ free_xen_pagetable_new(l1mfn);
}
- pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
+ pl1e = map_l1t_from_l2e(*pl2e) + l1_table_offset(virt);
ol1e = *pl1e;
l1e_write_atomic(pl1e, l1e_from_mfn(mfn, flags));
+ UNMAP_DOMAIN_PAGE(pl1e);
if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
{
unsigned int flush_flags = FLUSH_TLB | FLUSH_ORDER(0);
@@ -5440,12 +5450,13 @@ int map_pages_to_xen(
goto check_l3;
}
- l1t = l2e_to_l1e(ol2e);
+ l1t = map_l1t_from_l2e(ol2e);
base_mfn = l1e_get_pfn(l1t[0]) & ~(L1_PAGETABLE_ENTRIES - 1);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
if ( (l1e_get_pfn(l1t[i]) != (base_mfn + i)) ||
(l1e_get_flags(l1t[i]) != flags) )
break;
+ UNMAP_DOMAIN_PAGE(l1t);
if ( i == L1_PAGETABLE_ENTRIES )
{
l2e_write_atomic(pl2e, l2e_from_pfn(base_mfn,
@@ -5455,7 +5466,7 @@ int map_pages_to_xen(
flush_area(virt - PAGE_SIZE,
FLUSH_TLB_GLOBAL |
FLUSH_ORDER(PAGETABLE_ORDER));
- free_xen_pagetable(l2e_to_l1e(ol2e));
+ free_xen_pagetable_new(l2e_get_mfn(ol2e));
}
else if ( locking )
spin_unlock(&map_pgdir_lock);
@@ -5488,7 +5499,7 @@ int map_pages_to_xen(
continue;
}
- l2t = l3e_to_l2e(ol3e);
+ l2t = map_l2t_from_l3e(ol3e);
base_mfn = l2e_get_pfn(l2t[0]) & ~(L2_PAGETABLE_ENTRIES *
L1_PAGETABLE_ENTRIES - 1);
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
@@ -5496,6 +5507,7 @@ int map_pages_to_xen(
(base_mfn + (i << PAGETABLE_ORDER))) ||
(l2e_get_flags(l2t[i]) != l1f_to_lNf(flags)) )
break;
+ UNMAP_DOMAIN_PAGE(l2t);
if ( i == L2_PAGETABLE_ENTRIES )
{
l3e_write_atomic(pl3e, l3e_from_pfn(base_mfn,
@@ -5505,7 +5517,7 @@ int map_pages_to_xen(
flush_area(virt - PAGE_SIZE,
FLUSH_TLB_GLOBAL |
FLUSH_ORDER(2*PAGETABLE_ORDER));
- free_xen_pagetable(l3e_to_l2e(ol3e));
+ free_xen_pagetable_new(l3e_get_mfn(ol3e));
}
else if ( locking )
spin_unlock(&map_pgdir_lock);
--
2.23.3
next prev parent reply other threads:[~2021-04-06 11:06 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-06 11:05 [PATCH v9 00/13] switch to domheap for Xen page tables Hongyan Xia
2021-04-06 11:05 ` [PATCH v9 01/13] x86/mm: rewrite virt_to_xen_l*e Hongyan Xia
2021-04-20 12:17 ` Jan Beulich
2021-04-21 11:33 ` Hongyan Xia
2021-04-21 11:39 ` Jan Beulich
2021-04-06 11:05 ` Hongyan Xia [this message]
2021-04-06 11:05 ` [PATCH v9 03/13] x86/mm: switch to new APIs in modify_xen_mappings Hongyan Xia
2021-04-06 11:05 ` [PATCH v9 04/13] x86_64/mm: introduce pl2e in paging_init Hongyan Xia
2021-04-06 11:05 ` [PATCH v9 05/13] x86_64/mm: switch to new APIs " Hongyan Xia
2021-04-06 11:05 ` [PATCH v9 06/13] x86_64/mm: switch to new APIs in setup_m2p_table Hongyan Xia
2021-04-06 11:05 ` [PATCH v9 07/13] efi: use new page table APIs in copy_mapping Hongyan Xia
2021-04-06 11:05 ` [PATCH v9 08/13] efi: switch to new APIs in EFI code Hongyan Xia
2021-04-06 11:05 ` [PATCH v9 09/13] x86/smpboot: add exit path for clone_mapping() Hongyan Xia
2021-04-20 12:29 ` Jan Beulich
2021-04-06 11:05 ` [PATCH v9 10/13] x86/smpboot: switch clone_mapping() to new APIs Hongyan Xia
2021-04-20 12:32 ` Jan Beulich
2021-04-21 13:39 ` Hongyan Xia
2021-04-06 11:05 ` [PATCH v9 11/13] x86/mm: drop old page table APIs Hongyan Xia
2021-04-06 11:06 ` [PATCH v9 12/13] x86: switch to use domheap page for page tables Hongyan Xia
2021-04-06 11:06 ` [PATCH v9 13/13] x86/mm: drop _new suffix for page table APIs Hongyan Xia
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=d179b2d13d1f8fb25ee597cfcfb7a03e6cb87ab4.1617706782.git.hongyxia@amazon.com \
--to=hx242@xen.org \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=jgrall@amazon.com \
--cc=roger.pau@citrix.com \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).