* [Xen-devel] [PATCH] x86/mm: switch to new APIs in arch_init_memory
@ 2020-02-21 10:42 Hongyan Xia
2020-02-21 13:05 ` Julien Grall
2020-02-26 11:56 ` Jan Beulich
0 siblings, 2 replies; 3+ messages in thread
From: Hongyan Xia @ 2020-02-21 10:42 UTC (permalink / raw)
To: xen-devel
Cc: Stefano Stabellini, Julien Grall, Wei Liu, Konrad Rzeszutek Wilk,
George Dunlap, Andrew Cooper, Ian Jackson, Hongyan Xia,
Jan Beulich, Roger Pau Monné
From: Wei Liu <wei.liu2@citrix.com>
Since we now map and unmap Xen PTE pages, we would like to track the
lifetime of mappings so that 1) we do not dereference memory through a
variable after it is unmapped, 2) we do not unmap more than once.
Therefore, we introduce the UNMAP_DOMAIN_PAGE macro to nullify the
variable after unmapping, and ignore NULL in unmap_domain_page.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
---
xen/arch/x86/domain_page.c | 2 +-
xen/arch/x86/mm.c | 14 ++++++++------
xen/include/xen/domain_page.h | 5 +++++
3 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index dd32712d2f..b03728e18e 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -181,7 +181,7 @@ void unmap_domain_page(const void *ptr)
unsigned long va = (unsigned long)ptr, mfn, flags;
struct vcpu_maphash_entry *hashent;
- if ( va >= DIRECTMAP_VIRT_START )
+ if ( !va || va >= DIRECTMAP_VIRT_START )
return;
ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 70b87c4830..9fcdcde5b7 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -356,19 +356,21 @@ void __init arch_init_memory(void)
ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
{
- l3_pgentry_t *l3tab = alloc_xen_pagetable();
+ mfn_t l3mfn = alloc_xen_pagetable_new();
- if ( l3tab )
+ if ( !mfn_eq(l3mfn, INVALID_MFN) )
{
- const l3_pgentry_t *l3idle =
- l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
+ l3_pgentry_t *l3idle = map_l3t_from_l4e(
+ idle_pg_table[l4_table_offset(split_va)]);
+ l3_pgentry_t *l3tab = map_domain_page(l3mfn);
for ( i = 0; i < l3_table_offset(split_va); ++i )
l3tab[i] = l3idle[i];
for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
l3tab[i] = l3e_empty();
- split_l4e = l4e_from_mfn(virt_to_mfn(l3tab),
- __PAGE_HYPERVISOR_RW);
+ split_l4e = l4e_from_mfn(l3mfn, __PAGE_HYPERVISOR_RW);
+ UNMAP_DOMAIN_PAGE(l3idle);
+ UNMAP_DOMAIN_PAGE(l3tab);
}
else
++root_pgt_pv_xen_slots;
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index 32669a3339..a182d33b67 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -72,4 +72,9 @@ static inline void unmap_domain_page_global(const void *va) {};
#endif /* !CONFIG_DOMAIN_PAGE */
+#define UNMAP_DOMAIN_PAGE(p) do { \
+ unmap_domain_page(p); \
+ (p) = NULL; \
+} while ( false )
+
#endif /* __XEN_DOMAIN_PAGE_H__ */
--
2.17.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [Xen-devel] [PATCH] x86/mm: switch to new APIs in arch_init_memory
2020-02-21 10:42 [Xen-devel] [PATCH] x86/mm: switch to new APIs in arch_init_memory Hongyan Xia
@ 2020-02-21 13:05 ` Julien Grall
2020-02-26 11:56 ` Jan Beulich
1 sibling, 0 replies; 3+ messages in thread
From: Julien Grall @ 2020-02-21 13:05 UTC (permalink / raw)
To: Hongyan Xia, xen-devel
Cc: Stefano Stabellini, Wei Liu, Konrad Rzeszutek Wilk,
George Dunlap, Andrew Cooper, Ian Jackson, Jan Beulich,
Roger Pau Monné
Hi Hongyan,
On 21/02/2020 10:42, Hongyan Xia wrote:
> From: Wei Liu <wei.liu2@citrix.com>
>
> Since we now map and unmap Xen PTE pages, we would like to track the
> lifetime of mappings so that 1) we do not dereference memory through a
> variable after it is unmapped, 2) we do not unmap more than once.
> Therefore, we introduce the UNMAP_DOMAIN_PAGE macro to nullify the
> variable after unmapping, and ignore NULL in unmap_domain_page.
If we want to support NULL in unmap_domain_page() then we would need to
replicate the change for Arm as well.
Cheers,
>
> Signed-off-by: Wei Liu <wei.liu2@citrix.com>
> Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
> ---
> xen/arch/x86/domain_page.c | 2 +-
> xen/arch/x86/mm.c | 14 ++++++++------
> xen/include/xen/domain_page.h | 5 +++++
> 3 files changed, 14 insertions(+), 7 deletions(-)
>
> diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
> index dd32712d2f..b03728e18e 100644
> --- a/xen/arch/x86/domain_page.c
> +++ b/xen/arch/x86/domain_page.c
> @@ -181,7 +181,7 @@ void unmap_domain_page(const void *ptr)
> unsigned long va = (unsigned long)ptr, mfn, flags;
> struct vcpu_maphash_entry *hashent;
>
> - if ( va >= DIRECTMAP_VIRT_START )
> + if ( !va || va >= DIRECTMAP_VIRT_START )
> return;
>
> ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 70b87c4830..9fcdcde5b7 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -356,19 +356,21 @@ void __init arch_init_memory(void)
> ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
> if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
> {
> - l3_pgentry_t *l3tab = alloc_xen_pagetable();
> + mfn_t l3mfn = alloc_xen_pagetable_new();
>
> - if ( l3tab )
> + if ( !mfn_eq(l3mfn, INVALID_MFN) )
> {
> - const l3_pgentry_t *l3idle =
> - l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
> + l3_pgentry_t *l3idle = map_l3t_from_l4e(
> + idle_pg_table[l4_table_offset(split_va)]);
> + l3_pgentry_t *l3tab = map_domain_page(l3mfn);
>
> for ( i = 0; i < l3_table_offset(split_va); ++i )
> l3tab[i] = l3idle[i];
> for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
> l3tab[i] = l3e_empty();
> - split_l4e = l4e_from_mfn(virt_to_mfn(l3tab),
> - __PAGE_HYPERVISOR_RW);
> + split_l4e = l4e_from_mfn(l3mfn, __PAGE_HYPERVISOR_RW);
> + UNMAP_DOMAIN_PAGE(l3idle);
> + UNMAP_DOMAIN_PAGE(l3tab);
> }
> else
> ++root_pgt_pv_xen_slots;
> diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
> index 32669a3339..a182d33b67 100644
> --- a/xen/include/xen/domain_page.h
> +++ b/xen/include/xen/domain_page.h
> @@ -72,4 +72,9 @@ static inline void unmap_domain_page_global(const void *va) {};
>
> #endif /* !CONFIG_DOMAIN_PAGE */
>
> +#define UNMAP_DOMAIN_PAGE(p) do { \
> + unmap_domain_page(p); \
> + (p) = NULL; \
> +} while ( false )
> +
> #endif /* __XEN_DOMAIN_PAGE_H__ */
>
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [Xen-devel] [PATCH] x86/mm: switch to new APIs in arch_init_memory
2020-02-21 10:42 [Xen-devel] [PATCH] x86/mm: switch to new APIs in arch_init_memory Hongyan Xia
2020-02-21 13:05 ` Julien Grall
@ 2020-02-26 11:56 ` Jan Beulich
1 sibling, 0 replies; 3+ messages in thread
From: Jan Beulich @ 2020-02-26 11:56 UTC (permalink / raw)
To: Hongyan Xia
Cc: Stefano Stabellini, Julien Grall, Wei Liu, Konrad Rzeszutek Wilk,
George Dunlap, Andrew Cooper, Ian Jackson, xen-devel,
Roger Pau Monné
On 21.02.2020 11:42, Hongyan Xia wrote:
> From: Wei Liu <wei.liu2@citrix.com>
>
> Since we now map and unmap Xen PTE pages, we would like to track the
> lifetime of mappings so that 1) we do not dereference memory through a
> variable after it is unmapped, 2) we do not unmap more than once.
> Therefore, we introduce the UNMAP_DOMAIN_PAGE macro to nullify the
> variable after unmapping, and ignore NULL in unmap_domain_page.
To me this reads as if it was a 2nd paragraph explaining what needs
doing in order to achieve the main goal of the patch (supposedly
described in a 1st paragraph).
> --- a/xen/arch/x86/domain_page.c
> +++ b/xen/arch/x86/domain_page.c
> @@ -181,7 +181,7 @@ void unmap_domain_page(const void *ptr)
> unsigned long va = (unsigned long)ptr, mfn, flags;
> struct vcpu_maphash_entry *hashent;
>
> - if ( va >= DIRECTMAP_VIRT_START )
> + if ( !va || va >= DIRECTMAP_VIRT_START )
> return;
If we are to go with this, then I agree with Julien that this needs
mirroring to Arm, allowing common code to assume this behavior.
However, an alternative to this is to make ...
> --- a/xen/include/xen/domain_page.h
> +++ b/xen/include/xen/domain_page.h
> @@ -72,4 +72,9 @@ static inline void unmap_domain_page_global(const void *va) {};
>
> #endif /* !CONFIG_DOMAIN_PAGE */
>
> +#define UNMAP_DOMAIN_PAGE(p) do { \
> + unmap_domain_page(p); \
> + (p) = NULL; \
> +} while ( false )
... this avoid the call, leaving the function itself untouched.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-02-26 11:56 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-21 10:42 [Xen-devel] [PATCH] x86/mm: switch to new APIs in arch_init_memory Hongyan Xia
2020-02-21 13:05 ` Julien Grall
2020-02-26 11:56 ` Jan Beulich
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).