* [PATCH] x86/mm: Use static inlines for {, un}adjust_guest_l?e()
@ 2017-09-04 10:02 Andrew Cooper
2017-09-04 10:33 ` Jan Beulich
2017-09-04 10:43 ` Wei Liu
0 siblings, 2 replies; 4+ messages in thread
From: Andrew Cooper @ 2017-09-04 10:02 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Wei Liu, Jan Beulich
There is no need for these to be macros, and the result is easier to read.
No functional change, but bloat-o-meter reports the following improvement:
add/remove: 1/0 grow/shrink: 2/3 up/down: 235/-427 (-192)
function old new delta
__get_page_type 5231 5351 +120
adjust_guest_l1e.isra - 96 +96
free_page_type 1540 1559 +19
ptwr_emulated_update 1008 957 -51
create_grant_pv_mapping 1342 1186 -156
mod_l1_entry 1892 1672 -220
adjust_guest_l1e(), now being a compiler-visible single unit, is chosen for
out-of-line'ing from its several callsites. The other helpers remain inline.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
---
xen/arch/x86/mm.c | 137 +++++++++++++++++++++++++++++-------------------------
1 file changed, 74 insertions(+), 63 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e5b0cce..b82cec4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1219,52 +1219,63 @@ get_page_from_l4e(
return rc;
}
-#define adjust_guest_l1e(pl1e, d) \
- do { \
- if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) && \
- likely(!is_pv_32bit_domain(d)) ) \
- { \
- /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */ \
- if ( (l1e_get_flags((pl1e)) & (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL)) \
- == (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL) ) \
- gdprintk(XENLOG_WARNING, \
- "Global bit is set to kernel page %lx\n", \
- l1e_get_pfn((pl1e))); \
- if ( !(l1e_get_flags((pl1e)) & _PAGE_USER) ) \
- l1e_add_flags((pl1e), (_PAGE_GUEST_KERNEL|_PAGE_USER)); \
- if ( !(l1e_get_flags((pl1e)) & _PAGE_GUEST_KERNEL) ) \
- l1e_add_flags((pl1e), (_PAGE_GLOBAL|_PAGE_USER)); \
- } \
- } while ( 0 )
-
-#define adjust_guest_l2e(pl2e, d) \
- do { \
- if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) && \
- likely(!is_pv_32bit_domain(d)) ) \
- l2e_add_flags((pl2e), _PAGE_USER); \
- } while ( 0 )
-
-#define adjust_guest_l3e(pl3e, d) \
- do { \
- if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \
- l3e_add_flags((pl3e), likely(!is_pv_32bit_domain(d)) ? \
- _PAGE_USER : \
- _PAGE_USER|_PAGE_RW); \
- } while ( 0 )
-
-#define adjust_guest_l4e(pl4e, d) \
- do { \
- if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) && \
- likely(!is_pv_32bit_domain(d)) ) \
- l4e_add_flags((pl4e), _PAGE_USER); \
- } while ( 0 )
-
-#define unadjust_guest_l3e(pl3e, d) \
- do { \
- if ( unlikely(is_pv_32bit_domain(d)) && \
- likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \
- l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED); \
- } while ( 0 )
+static l1_pgentry_t adjust_guest_l1e(l1_pgentry_t l1e, const struct domain *d)
+{
+ if ( likely(l1e_get_flags(l1e) & _PAGE_PRESENT) &&
+ likely(!is_pv_32bit_domain(d)) )
+ {
+ /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */
+ if ( (l1e_get_flags(l1e) & (_PAGE_GUEST_KERNEL | _PAGE_GLOBAL)) ==
+ (_PAGE_GUEST_KERNEL | _PAGE_GLOBAL) )
+ gdprintk(XENLOG_WARNING,
+ "Global bit is set in kernel page %lx\n",
+ l1e_get_pfn(l1e));
+
+ if ( !(l1e_get_flags(l1e) & _PAGE_USER) )
+ l1e_add_flags(l1e, (_PAGE_GUEST_KERNEL | _PAGE_USER));
+
+ if ( !(l1e_get_flags(l1e) & _PAGE_GUEST_KERNEL) )
+ l1e_add_flags(l1e, (_PAGE_GLOBAL | _PAGE_USER));
+ }
+
+ return l1e;
+}
+
+static l2_pgentry_t adjust_guest_l2e(l2_pgentry_t l2e, const struct domain *d)
+{
+ if ( likely(l2e_get_flags(l2e) & _PAGE_PRESENT) &&
+ likely(!is_pv_32bit_domain(d)) )
+ l2e_add_flags(l2e, _PAGE_USER);
+
+ return l2e;
+}
+
+static l3_pgentry_t adjust_guest_l3e(l3_pgentry_t l3e, const struct domain *d)
+{
+ if ( likely(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ l3e_add_flags(l3e, (likely(!is_pv_32bit_domain(d))
+ ? _PAGE_USER : _PAGE_USER | _PAGE_RW));
+
+ return l3e;
+}
+
+static l3_pgentry_t unadjust_guest_l3e(l3_pgentry_t l3e, const struct domain *d)
+{
+ if ( unlikely(is_pv_32bit_domain(d)) &&
+ likely(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ l3e_remove_flags(l3e, _PAGE_USER | _PAGE_RW | _PAGE_ACCESSED);
+
+ return l3e;
+}
+
+static l4_pgentry_t adjust_guest_l4e(l4_pgentry_t l4e, const struct domain *d)
+{
+ if ( likely(l4e_get_flags(l4e) & _PAGE_PRESENT) &&
+ likely(!is_pv_32bit_domain(d)) )
+ l4e_add_flags(l4e, _PAGE_USER);
+
+ return l4e;
+}
void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
{
@@ -1436,7 +1447,7 @@ static int alloc_l1_table(struct page_info *page)
break;
}
- adjust_guest_l1e(pl1e[i], d);
+ pl1e[i] = adjust_guest_l1e(pl1e[i], d);
}
unmap_domain_page(pl1e);
@@ -1525,7 +1536,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type,
break;
}
- adjust_guest_l2e(pl2e[i], d);
+ pl2e[i] = adjust_guest_l2e(pl2e[i], d);
}
if ( rc >= 0 && (type & PGT_pae_xen_l2) )
@@ -1591,7 +1602,7 @@ static int alloc_l3_table(struct page_info *page)
if ( rc < 0 )
break;
- adjust_guest_l3e(pl3e[i], d);
+ pl3e[i] = adjust_guest_l3e(pl3e[i], d);
}
if ( rc >= 0 && !create_pae_xen_mappings(d, pl3e) )
@@ -1606,7 +1617,7 @@ static int alloc_l3_table(struct page_info *page)
current->arch.old_guest_table = page;
}
while ( i-- > 0 )
- unadjust_guest_l3e(pl3e[i], d);
+ pl3e[i] = unadjust_guest_l3e(pl3e[i], d);
}
unmap_domain_page(pl3e);
@@ -1716,7 +1727,7 @@ static int alloc_l4_table(struct page_info *page)
return rc;
}
- adjust_guest_l4e(pl4e[i], d);
+ pl4e[i] = adjust_guest_l4e(pl4e[i], d);
}
if ( rc >= 0 )
@@ -1791,7 +1802,7 @@ static int free_l3_table(struct page_info *page)
partial = 0;
if ( rc > 0 )
continue;
- unadjust_guest_l3e(pl3e[i], d);
+ pl3e[i] = unadjust_guest_l3e(pl3e[i], d);
} while ( i-- );
unmap_domain_page(pl3e);
@@ -1978,7 +1989,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
/* Fast path for sufficiently-similar mappings. */
if ( !l1e_has_changed(ol1e, nl1e, ~FASTPATH_FLAG_WHITELIST) )
{
- adjust_guest_l1e(nl1e, pt_dom);
+ nl1e = adjust_guest_l1e(nl1e, pt_dom);
rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
preserve_ad);
if ( page )
@@ -2003,7 +2014,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
if ( page )
put_page(page);
- adjust_guest_l1e(nl1e, pt_dom);
+ nl1e = adjust_guest_l1e(nl1e, pt_dom);
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
preserve_ad)) )
{
@@ -2057,7 +2068,7 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
/* Fast path for sufficiently-similar mappings. */
if ( !l2e_has_changed(ol2e, nl2e, ~FASTPATH_FLAG_WHITELIST) )
{
- adjust_guest_l2e(nl2e, d);
+ nl2e = adjust_guest_l2e(nl2e, d);
if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) )
return 0;
return -EBUSY;
@@ -2066,7 +2077,7 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
if ( unlikely((rc = get_page_from_l2e(nl2e, pfn, d)) < 0) )
return rc;
- adjust_guest_l2e(nl2e, d);
+ nl2e = adjust_guest_l2e(nl2e, d);
if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu,
preserve_ad)) )
{
@@ -2117,7 +2128,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
/* Fast path for sufficiently-similar mappings. */
if ( !l3e_has_changed(ol3e, nl3e, ~FASTPATH_FLAG_WHITELIST) )
{
- adjust_guest_l3e(nl3e, d);
+ nl3e = adjust_guest_l3e(nl3e, d);
rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, preserve_ad);
return rc ? 0 : -EFAULT;
}
@@ -2127,7 +2138,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
return rc;
rc = 0;
- adjust_guest_l3e(nl3e, d);
+ nl3e = adjust_guest_l3e(nl3e, d);
if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu,
preserve_ad)) )
{
@@ -2182,7 +2193,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
/* Fast path for sufficiently-similar mappings. */
if ( !l4e_has_changed(ol4e, nl4e, ~FASTPATH_FLAG_WHITELIST) )
{
- adjust_guest_l4e(nl4e, d);
+ nl4e = adjust_guest_l4e(nl4e, d);
rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
return rc ? 0 : -EFAULT;
}
@@ -2192,7 +2203,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
return rc;
rc = 0;
- adjust_guest_l4e(nl4e, d);
+ nl4e = adjust_guest_l4e(nl4e, d);
if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu,
preserve_ad)) )
{
@@ -3824,7 +3835,7 @@ static int create_grant_pte_mapping(
if ( !IS_ALIGNED(pte_addr, sizeof(nl1e)) )
return GNTST_general_error;
- adjust_guest_l1e(nl1e, d);
+ nl1e = adjust_guest_l1e(nl1e, d);
gmfn = pte_addr >> PAGE_SHIFT;
page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
@@ -3959,7 +3970,7 @@ static int create_grant_va_mapping(
struct page_info *l1pg;
int okay;
- adjust_guest_l1e(nl1e, d);
+ nl1e = adjust_guest_l1e(nl1e, d);
pl1e = guest_map_l1e(va, &gl1mfn);
if ( !pl1e )
@@ -5084,7 +5095,7 @@ static int ptwr_emulated_update(
break;
}
- adjust_guest_l1e(nl1e, d);
+ nl1e = adjust_guest_l1e(nl1e, d);
/* Checked successfully: do the update (write or cmpxchg). */
pl1e = map_domain_page(_mfn(mfn));
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] x86/mm: Use static inlines for {, un}adjust_guest_l?e()
2017-09-04 10:02 [PATCH] x86/mm: Use static inlines for {, un}adjust_guest_l?e() Andrew Cooper
@ 2017-09-04 10:33 ` Jan Beulich
2017-09-04 12:13 ` Andrew Cooper
2017-09-04 10:43 ` Wei Liu
1 sibling, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2017-09-04 10:33 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Wei Liu, Xen-devel
>>> On 04.09.17 at 12:02, <andrew.cooper3@citrix.com> wrote:
> +static l1_pgentry_t adjust_guest_l1e(l1_pgentry_t l1e, const struct domain *d)
> +{
> + if ( likely(l1e_get_flags(l1e) & _PAGE_PRESENT) &&
> + likely(!is_pv_32bit_domain(d)) )
> + {
> + /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */
> + if ( (l1e_get_flags(l1e) & (_PAGE_GUEST_KERNEL | _PAGE_GLOBAL)) ==
> + (_PAGE_GUEST_KERNEL | _PAGE_GLOBAL) )
> + gdprintk(XENLOG_WARNING,
> + "Global bit is set in kernel page %lx\n",
Looks like this could be a single line now.
> +static l4_pgentry_t adjust_guest_l4e(l4_pgentry_t l4e, const struct domain *d)
> +{
> + if ( likely(l4e_get_flags(l4e) & _PAGE_PRESENT) &&
> + likely(!is_pv_32bit_domain(d)) )
Would it be reasonable to move this 2nd condition out of the if()
into an ASSERT()? With or without that adjustment
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] x86/mm: Use static inlines for {, un}adjust_guest_l?e()
2017-09-04 10:02 [PATCH] x86/mm: Use static inlines for {, un}adjust_guest_l?e() Andrew Cooper
2017-09-04 10:33 ` Jan Beulich
@ 2017-09-04 10:43 ` Wei Liu
1 sibling, 0 replies; 4+ messages in thread
From: Wei Liu @ 2017-09-04 10:43 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Wei Liu, Jan Beulich, Xen-devel
On Mon, Sep 04, 2017 at 11:02:53AM +0100, Andrew Cooper wrote:
> There is no need for these to be macros, and the result is easier to read.
>
> No functional change, but bloat-o-meter reports the following improvement:
>
> add/remove: 1/0 grow/shrink: 2/3 up/down: 235/-427 (-192)
> function old new delta
> __get_page_type 5231 5351 +120
> adjust_guest_l1e.isra - 96 +96
> free_page_type 1540 1559 +19
> ptwr_emulated_update 1008 957 -51
> create_grant_pv_mapping 1342 1186 -156
> mod_l1_entry 1892 1672 -220
>
> adjust_guest_l1e(), now being a compiler-visible single unit, is chosen for
> out-of-line'ing from its several callsites. The other helpers remain inline.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] x86/mm: Use static inlines for {, un}adjust_guest_l?e()
2017-09-04 10:33 ` Jan Beulich
@ 2017-09-04 12:13 ` Andrew Cooper
0 siblings, 0 replies; 4+ messages in thread
From: Andrew Cooper @ 2017-09-04 12:13 UTC (permalink / raw)
To: Jan Beulich; +Cc: Wei Liu, Xen-devel
On 04/09/17 11:33, Jan Beulich wrote:
>>>> On 04.09.17 at 12:02, <andrew.cooper3@citrix.com> wrote:
>> +static l1_pgentry_t adjust_guest_l1e(l1_pgentry_t l1e, const struct domain *d)
>> +{
>> + if ( likely(l1e_get_flags(l1e) & _PAGE_PRESENT) &&
>> + likely(!is_pv_32bit_domain(d)) )
>> + {
>> + /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */
>> + if ( (l1e_get_flags(l1e) & (_PAGE_GUEST_KERNEL | _PAGE_GLOBAL)) ==
>> + (_PAGE_GUEST_KERNEL | _PAGE_GLOBAL) )
>> + gdprintk(XENLOG_WARNING,
>> + "Global bit is set in kernel page %lx\n",
> Looks like this could be a single line now.
So it can.
>
>> +static l4_pgentry_t adjust_guest_l4e(l4_pgentry_t l4e, const struct domain *d)
>> +{
>> + if ( likely(l4e_get_flags(l4e) & _PAGE_PRESENT) &&
>> + likely(!is_pv_32bit_domain(d)) )
> Would it be reasonable to move this 2nd condition out of the if()
> into an ASSERT()? With or without that adjustment
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
I'll do a separate patch clarifying the correctness of 32bit PV guests
in a number of places.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2017-09-04 12:13 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-04 10:02 [PATCH] x86/mm: Use static inlines for {, un}adjust_guest_l?e() Andrew Cooper
2017-09-04 10:33 ` Jan Beulich
2017-09-04 12:13 ` Andrew Cooper
2017-09-04 10:43 ` Wei Liu
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.