From mboxrd@z Thu Jan 1 00:00:00 1970 From: Ed White Subject: [PATCH 04/11] x86/MM: Improve p2m type checks. Date: Fri, 9 Jan 2015 13:26:34 -0800 Message-ID: <1420838801-11704-5-git-send-email-edmund.h.white@intel.com> References: <1420838801-11704-1-git-send-email-edmund.h.white@intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1420838801-11704-1-git-send-email-edmund.h.white@intel.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: keir@xen.org, ian.campbell@citrix.com, tim@xen.org, ian.jackson@eu.citrix.com, Ed White , jbeulich@suse.com List-Id: xen-devel@lists.xenproject.org The alternate p2m code will introduce a new p2m type. In preparation for using that new type, introduce the type indicator here and fix all the checks that assume !nestedp2m == hostp2m to explicitly check for hostp2m. Signed-off-by: Ed White --- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/mm/guest_walk.c | 2 +- xen/arch/x86/mm/hap/guest_walk.c | 4 ++-- xen/arch/x86/mm/p2m-ept.c | 4 ++-- xen/arch/x86/mm/p2m.c | 9 +++++---- xen/include/asm-x86/p2m.h | 7 ++++++- 6 files changed, 17 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 3a7367c..b89e9d2 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2861,7 +2861,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, /* Mem sharing: unshare the page and try again */ if ( npfec.write_access && (p2mt == p2m_ram_shared) ) { - ASSERT(!p2m_is_nestedp2m(p2m)); + ASSERT(p2m_is_hostp2m(p2m)); sharing_enomem = (mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0); rc = 1; diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c index 1b26175..d8f5a35 100644 --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -99,7 +99,7 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, q); if ( p2m_is_paging(*p2mt) ) { - ASSERT(!p2m_is_nestedp2m(p2m)); + ASSERT(p2m_is_hostp2m(p2m)); if ( page ) put_page(page); p2m_mem_paging_populate(p2m->domain, gfn_x(gfn)); diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c index 25d9792..381a196 100644 --- a/xen/arch/x86/mm/hap/guest_walk.c +++ b/xen/arch/x86/mm/hap/guest_walk.c @@ -64,7 +64,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE); if ( p2m_is_paging(p2mt) ) { - ASSERT(!p2m_is_nestedp2m(p2m)); + ASSERT(p2m_is_hostp2m(p2m)); pfec[0] = PFEC_page_paged; if ( top_page ) put_page(top_page); @@ -106,7 +106,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( put_page(page); if ( p2m_is_paging(p2mt) ) { - ASSERT(!p2m_is_nestedp2m(p2m)); + ASSERT(p2m_is_hostp2m(p2m)); pfec[0] = PFEC_page_paged; p2m_mem_paging_populate(p2m->domain, gfn_x(gfn)); return INVALID_GFN; diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index 2b9f07c..255b681 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -787,8 +787,8 @@ out: if ( needs_sync != sync_off ) ept_sync_domain(p2m); - /* For non-nested p2m, may need to change VT-d page table.*/ - if ( rc == 0 && !p2m_is_nestedp2m(p2m) && need_iommu(d) && + /* For host p2m, may need to change VT-d page table.*/ + if ( rc == 0 && p2m_is_hostp2m(p2m) && need_iommu(d) && need_modify_vtd_table ) { if ( iommu_hap_pt_share ) diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index efa49dd..49b66fb 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -73,6 +73,7 @@ static int p2m_initialise(struct domain *d, struct p2m_domain *p2m) p2m->default_access = p2m_access_rwx; p2m->np2m_base = P2M_BASE_EADDR; + p2m->alternate = 0; if ( hap_enabled(d) && cpu_has_vmx ) ret = ept_p2m_init(p2m); @@ -202,7 +203,7 @@ int p2m_init(struct domain *d) int p2m_is_logdirty_range(struct p2m_domain *p2m, unsigned long start, unsigned long end) { - ASSERT(!p2m_is_nestedp2m(p2m)); + ASSERT(p2m_is_hostp2m(p2m)); if ( p2m->global_logdirty || rangeset_contains_range(p2m->logdirty_ranges, start, end) ) return 1; @@ -263,7 +264,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn, if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) ) { - ASSERT(!p2m_is_nestedp2m(p2m)); + ASSERT(p2m_is_hostp2m(p2m)); /* Try to unshare. If we fail, communicate ENOMEM without * sleeping. */ if ( mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0 ) @@ -431,7 +432,7 @@ int p2m_alloc_table(struct p2m_domain *p2m) p2m_lock(p2m); - if ( !p2m_is_nestedp2m(p2m) + if ( p2m_is_hostp2m(p2m) && !page_list_empty(&d->page_list) ) { P2M_ERROR("dom %d already has memory allocated\n", d->domain_id); @@ -1708,7 +1709,7 @@ p2m_flush_table(struct p2m_domain *p2m) /* "Host" p2m tables can have shared entries &c that need a bit more * care when discarding them */ - ASSERT(p2m_is_nestedp2m(p2m)); + ASSERT(!p2m_is_hostp2m(p2m)); /* Nested p2m's do not do pod, hence the asserts (and no pod lock)*/ ASSERT(page_list_empty(&p2m->pod.super)); ASSERT(page_list_empty(&p2m->pod.single)); diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 5f7fe71..8193901 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -193,6 +193,9 @@ struct p2m_domain { * threaded on in LRU order. */ struct list_head np2m_list; + /* Does this p2m belong to the altp2m code? */ + bool_t alternate; + /* Host p2m: Log-dirty ranges registered for the domain. */ struct rangeset *logdirty_ranges; @@ -290,7 +293,9 @@ struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base); */ struct p2m_domain *p2m_get_p2m(struct vcpu *v); -#define p2m_is_nestedp2m(p2m) ((p2m) != p2m_get_hostp2m((p2m->domain))) +#define p2m_is_hostp2m(p2m) ((p2m) == p2m_get_hostp2m((p2m->domain))) +#define p2m_is_altp2m(p2m) ((p2m)->alternate) +#define p2m_is_nestedp2m(p2m) (!p2m_is_altp2m(p2m) && !p2m_ishostp2m(p2m)) #define p2m_get_pagetable(p2m) ((p2m)->phys_table) -- 1.9.1