From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Jan Beulich" Subject: [PATCH 07/11] x86: properly use map_domain_page() during page table manipulation Date: Tue, 22 Jan 2013 10:55:15 +0000 Message-ID: <50FE7E2302000078000B8345@nat28.tlf.novell.com> References: <50FE7BF502000078000B82F8@nat28.tlf.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="=__PartB5841603.0__=" Return-path: In-Reply-To: <50FE7BF502000078000B82F8@nat28.tlf.novell.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel List-Id: xen-devel@lists.xenproject.org This is a MIME message. If you are reading this text, you may want to consider changing to a mail reader or gateway that understands how to properly handle MIME multipart messages. --=__PartB5841603.0__= Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: quoted-printable Content-Disposition: inline Signed-off-by: Jan Beulich --- a/xen/arch/x86/debug.c +++ b/xen/arch/x86/debug.c @@ -98,8 +98,9 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct doma =20 if ( pgd3val =3D=3D 0 ) { - l4t =3D mfn_to_virt(mfn); + l4t =3D map_domain_page(mfn); l4e =3D l4t[l4_table_offset(vaddr)]; + unmap_domain_page(l4t); mfn =3D l4e_get_pfn(l4e); DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%lx\n", l4t,=20 l4_table_offset(vaddr), l4e, mfn); @@ -109,20 +110,23 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct doma return INVALID_MFN; } =20 - l3t =3D mfn_to_virt(mfn); + l3t =3D map_domain_page(mfn); l3e =3D l3t[l3_table_offset(vaddr)]; + unmap_domain_page(l3t); mfn =3D l3e_get_pfn(l3e); DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%lx\n", l3t,=20 l3_table_offset(vaddr), l3e, mfn); - if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) + if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || + (l3e_get_flags(l3e) & _PAGE_PSE) ) { DBGP1("l3 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, = cr3); return INVALID_MFN; } } =20 - l2t =3D mfn_to_virt(mfn); + l2t =3D map_domain_page(mfn); l2e =3D l2t[l2_table_offset(vaddr)]; + unmap_domain_page(l2t); mfn =3D l2e_get_pfn(l2e); DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr)= , l2e, mfn); @@ -132,8 +136,9 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct doma DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3); return INVALID_MFN; } - l1t =3D mfn_to_virt(mfn); + l1t =3D map_domain_page(mfn); l1e =3D l1t[l1_table_offset(vaddr)]; + unmap_domain_page(l1t); mfn =3D l1e_get_pfn(l1e); DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vaddr)= , l1e, mfn); --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -1331,7 +1331,7 @@ static int alloc_l4_table(struct page_in { struct domain *d =3D page_get_owner(page); unsigned long pfn =3D page_to_mfn(page); - l4_pgentry_t *pl4e =3D page_to_virt(page); + l4_pgentry_t *pl4e =3D map_domain_page(pfn); unsigned int i; int rc =3D 0, partial =3D page->partial_pte; =20 @@ -1365,12 +1365,16 @@ static int alloc_l4_table(struct page_in put_page_from_l4e(pl4e[i], pfn, 0, 0); } if ( rc < 0 ) + { + unmap_domain_page(pl4e); return rc; + } =20 adjust_guest_l4e(pl4e[i], d); } =20 init_guest_l4_table(pl4e, d); + unmap_domain_page(pl4e); =20 return rc > 0 ? 0 : rc; } @@ -1464,7 +1468,7 @@ static int free_l4_table(struct page_inf { struct domain *d =3D page_get_owner(page); unsigned long pfn =3D page_to_mfn(page); - l4_pgentry_t *pl4e =3D page_to_virt(page); + l4_pgentry_t *pl4e =3D map_domain_page(pfn); int rc =3D 0, partial =3D page->partial_pte; unsigned int i =3D page->nr_validated_ptes - !partial; =20 @@ -1487,6 +1491,9 @@ static int free_l4_table(struct page_inf page->partial_pte =3D 0; rc =3D -EAGAIN; } + + unmap_domain_page(pl4e); + return rc > 0 ? 0 : rc; } =20 @@ -4983,15 +4990,23 @@ int mmio_ro_do_page_fault(struct vcpu *v return rc !=3D X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0; } =20 -void free_xen_pagetable(void *v) +void *alloc_xen_pagetable(void) { - if ( system_state =3D=3D SYS_STATE_early_boot ) - return; + if ( system_state !=3D SYS_STATE_early_boot ) + { + void *ptr =3D alloc_xenheap_page(); =20 - if ( is_xen_heap_page(virt_to_page(v)) ) + BUG_ON(!dom0 && !ptr); + return ptr; + } + + return mfn_to_virt(alloc_boot_pages(1, 1)); +} + +void free_xen_pagetable(void *v) +{ + if ( system_state !=3D SYS_STATE_early_boot ) free_xenheap_page(v); - else - free_domheap_page(virt_to_page(v)); } =20 /* Convert to from superpage-mapping flags for map_pages_to_xen(). */ --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -180,6 +180,11 @@ static void show_guest_stack(struct vcpu printk(" %p", _p(addr)); stack++; } + if ( mask =3D=3D PAGE_SIZE ) + { + BUILD_BUG_ON(PAGE_SIZE =3D=3D STACK_SIZE); + unmap_domain_page(stack); + } if ( i =3D=3D 0 ) printk("Stack empty."); printk("\n"); --- a/xen/arch/x86/x86_64/compat/traps.c +++ b/xen/arch/x86/x86_64/compat/traps.c @@ -56,6 +56,11 @@ void compat_show_guest_stack(struct vcpu printk(" %08x", addr); stack++; } + if ( mask =3D=3D PAGE_SIZE ) + { + BUILD_BUG_ON(PAGE_SIZE =3D=3D STACK_SIZE); + unmap_domain_page(stack); + } if ( i =3D=3D 0 ) printk("Stack empty."); printk("\n"); --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -65,22 +65,6 @@ int __mfn_valid(unsigned long mfn) pdx_group_valid)); } =20 -void *alloc_xen_pagetable(void) -{ - unsigned long mfn; - - if ( system_state !=3D SYS_STATE_early_boot ) - { - struct page_info *pg =3D alloc_domheap_page(NULL, 0); - - BUG_ON(!dom0 && !pg); - return pg ? page_to_virt(pg) : NULL; - } - - mfn =3D alloc_boot_pages(1, 1); - return mfn_to_virt(mfn); -} - l3_pgentry_t *virt_to_xen_l3e(unsigned long v) { l4_pgentry_t *pl4e; @@ -154,35 +138,45 @@ void *do_page_walk(struct vcpu *v, unsig if ( is_hvm_vcpu(v) ) return NULL; =20 - l4t =3D mfn_to_virt(mfn); + l4t =3D map_domain_page(mfn); l4e =3D l4t[l4_table_offset(addr)]; - mfn =3D l4e_get_pfn(l4e); + unmap_domain_page(l4t); if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ) return NULL; =20 - l3t =3D mfn_to_virt(mfn); + l3t =3D map_l3t_from_l4e(l4e); l3e =3D l3t[l3_table_offset(addr)]; + unmap_domain_page(l3t); mfn =3D l3e_get_pfn(l3e); if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) ) return NULL; if ( (l3e_get_flags(l3e) & _PAGE_PSE) ) - return mfn_to_virt(mfn) + (addr & ((1UL << L3_PAGETABLE_SHIFT) - = 1)); + { + mfn +=3D PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1)); + goto ret; + } =20 - l2t =3D mfn_to_virt(mfn); + l2t =3D map_domain_page(mfn); l2e =3D l2t[l2_table_offset(addr)]; + unmap_domain_page(l2t); mfn =3D l2e_get_pfn(l2e); if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) ) return NULL; if ( (l2e_get_flags(l2e) & _PAGE_PSE) ) - return mfn_to_virt(mfn) + (addr & ((1UL << L2_PAGETABLE_SHIFT) - = 1)); + { + mfn +=3D PFN_DOWN(addr & ((1UL << L2_PAGETABLE_SHIFT) - 1)); + goto ret; + } =20 - l1t =3D mfn_to_virt(mfn); + l1t =3D map_domain_page(mfn); l1e =3D l1t[l1_table_offset(addr)]; + unmap_domain_page(l1t); mfn =3D l1e_get_pfn(l1e); if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) ) return NULL; =20 - return mfn_to_virt(mfn) + (addr & ~PAGE_MASK); + ret: + return map_domain_page(mfn) + (addr & ~PAGE_MASK); } =20 void __init pfn_pdx_hole_setup(unsigned long mask) @@ -519,10 +513,9 @@ static int setup_compat_m2p_table(struct static int setup_m2p_table(struct mem_hotadd_info *info) { unsigned long i, va, smap, emap; - unsigned int n, memflags; + unsigned int n; l2_pgentry_t *l2_ro_mpt =3D NULL; l3_pgentry_t *l3_ro_mpt =3D NULL; - struct page_info *l2_pg; int ret =3D 0; =20 ASSERT(l4e_get_flags(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]= ) @@ -560,7 +553,6 @@ static int setup_m2p_table(struct mem_ho } =20 va =3D RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping); - memflags =3D MEMF_node(phys_to_nid(i << PAGE_SHIFT)); =20 for ( n =3D 0; n < CNT; ++n) if ( mfn_valid(i + n * PDX_GROUP_COUNT) ) @@ -587,19 +579,18 @@ static int setup_m2p_table(struct mem_ho l2_table_offset(va); else { - l2_pg =3D alloc_domheap_page(NULL, memflags); - - if (!l2_pg) + l2_ro_mpt =3D alloc_xen_pagetable(); + if ( !l2_ro_mpt ) { ret =3D -ENOMEM; goto error; } =20 - l2_ro_mpt =3D page_to_virt(l2_pg); clear_page(l2_ro_mpt); l3e_write(&l3_ro_mpt[l3_table_offset(va)], - l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER)); - l2_ro_mpt +=3D l2_table_offset(va); + l3e_from_paddr(__pa(l2_ro_mpt), + __PAGE_HYPERVISOR | _PAGE_USER));= + l2_ro_mpt +=3D l2_table_offset(va); } =20 /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this = area. */ @@ -762,12 +753,12 @@ void __init paging_init(void) l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)); l3_ro_mpt =3D l4e_to_l3e(idle_pg_table[l4_table_offset( HIRO_COMPAT_MPT_VIRT_START)]); - if ( (l2_pg =3D alloc_domheap_page(NULL, 0)) =3D=3D NULL ) + if ( (l2_ro_mpt =3D alloc_xen_pagetable()) =3D=3D NULL ) goto nomem; - compat_idle_pg_table_l2 =3D l2_ro_mpt =3D page_to_virt(l2_pg); + compat_idle_pg_table_l2 =3D l2_ro_mpt; clear_page(l2_ro_mpt); l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)], - l3e_from_page(l2_pg, __PAGE_HYPERVISOR)); + l3e_from_paddr(__pa(l2_ro_mpt), __PAGE_HYPERVISOR)); l2_ro_mpt +=3D l2_table_offset(HIRO_COMPAT_MPT_VIRT_START); /* Allocate and map the compatibility mode machine-to-phys table. */ mpt_size =3D (mpt_size >> 1) + (1UL << (L2_PAGETABLE_SHIFT - 1)); --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -175,8 +175,9 @@ void show_page_walk(unsigned long addr) =20 printk("Pagetable walk from %016lx:\n", addr); =20 - l4t =3D mfn_to_virt(mfn); + l4t =3D map_domain_page(mfn); l4e =3D l4t[l4_table_offset(addr)]; + unmap_domain_page(l4t); mfn =3D l4e_get_pfn(l4e); pfn =3D mfn_valid(mfn) && machine_to_phys_mapping_valid ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; @@ -186,8 +187,9 @@ void show_page_walk(unsigned long addr) !mfn_valid(mfn) ) return; =20 - l3t =3D mfn_to_virt(mfn); + l3t =3D map_domain_page(mfn); l3e =3D l3t[l3_table_offset(addr)]; + unmap_domain_page(l3t); mfn =3D l3e_get_pfn(l3e); pfn =3D mfn_valid(mfn) && machine_to_phys_mapping_valid ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; @@ -199,8 +201,9 @@ void show_page_walk(unsigned long addr) !mfn_valid(mfn) ) return; =20 - l2t =3D mfn_to_virt(mfn); + l2t =3D map_domain_page(mfn); l2e =3D l2t[l2_table_offset(addr)]; + unmap_domain_page(l2t); mfn =3D l2e_get_pfn(l2e); pfn =3D mfn_valid(mfn) && machine_to_phys_mapping_valid ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; @@ -212,8 +215,9 @@ void show_page_walk(unsigned long addr) !mfn_valid(mfn) ) return; =20 - l1t =3D mfn_to_virt(mfn); + l1t =3D map_domain_page(mfn); l1e =3D l1t[l1_table_offset(addr)]; + unmap_domain_page(l1t); mfn =3D l1e_get_pfn(l1e); pfn =3D mfn_valid(mfn) && machine_to_phys_mapping_valid ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -172,6 +172,10 @@ static inline l4_pgentry_t l4e_from_padd #define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x))= ) #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x))= ) =20 +#define map_l1t_from_l2e(x) ((l1_pgentry_t *)map_domain_page(l2e_ge= t_pfn(x))) +#define map_l2t_from_l3e(x) ((l2_pgentry_t *)map_domain_page(l3e_ge= t_pfn(x))) +#define map_l3t_from_l4e(x) ((l3_pgentry_t *)map_domain_page(l4e_ge= t_pfn(x))) + /* Given a virtual address, get an entry offset into a page table. */ #define l1_table_offset(a) \ (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)) --=__PartB5841603.0__= Content-Type: text/plain; name="x86-map-domain-pagetable.patch" Content-Transfer-Encoding: quoted-printable Content-Disposition: attachment; filename="x86-map-domain-pagetable.patch" x86: properly use map_domain_page() during page table manipulation=0A=0ASig= ned-off-by: Jan Beulich =0A=0A--- a/xen/arch/x86/debug.c= =0A+++ b/xen/arch/x86/debug.c=0A@@ -98,8 +98,9 @@ dbg_pv_va2mfn(dbgva_t = vaddr, struct doma=0A =0A if ( pgd3val =3D=3D 0 )=0A {=0A- = l4t =3D mfn_to_virt(mfn);=0A+ l4t =3D map_domain_page(mfn);=0A = l4e =3D l4t[l4_table_offset(vaddr)];=0A+ unmap_domain_page(l4t);= =0A mfn =3D l4e_get_pfn(l4e);=0A DBGP2("l4t:%p l4to:%lx = l4e:%lx mfn:%lx\n", l4t, =0A l4_table_offset(vaddr), l4e, = mfn);=0A@@ -109,20 +110,23 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct doma=0A = return INVALID_MFN;=0A }=0A =0A- l3t =3D = mfn_to_virt(mfn);=0A+ l3t =3D map_domain_page(mfn);=0A l3e = =3D l3t[l3_table_offset(vaddr)];=0A+ unmap_domain_page(l3t);=0A = mfn =3D l3e_get_pfn(l3e);=0A DBGP2("l3t:%p l3to:%lx l3e:%lx = mfn:%lx\n", l3t, =0A l3_table_offset(vaddr), l3e, mfn);=0A- = if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )=0A+ if ( = !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||=0A+ (l3e_get_flags(l3e= ) & _PAGE_PSE) )=0A {=0A DBGP1("l3 PAGE not present. = vaddr:%lx cr3:%lx\n", vaddr, cr3);=0A return INVALID_MFN;=0A = }=0A }=0A =0A- l2t =3D mfn_to_virt(mfn);=0A+ l2t =3D = map_domain_page(mfn);=0A l2e =3D l2t[l2_table_offset(vaddr)];=0A+ = unmap_domain_page(l2t);=0A mfn =3D l2e_get_pfn(l2e);=0A DBGP2("l2t:= %p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr),=0A = l2e, mfn);=0A@@ -132,8 +136,9 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct = doma=0A DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, = cr3);=0A return INVALID_MFN;=0A }=0A- l1t =3D mfn_to_virt(mf= n);=0A+ l1t =3D map_domain_page(mfn);=0A l1e =3D l1t[l1_table_offset= (vaddr)];=0A+ unmap_domain_page(l1t);=0A mfn =3D l1e_get_pfn(l1e);= =0A DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vad= dr),=0A l1e, mfn);=0A--- a/xen/arch/x86/mm.c=0A+++ b/xen/arch/x86= /mm.c=0A@@ -1331,7 +1331,7 @@ static int alloc_l4_table(struct page_in=0A = {=0A struct domain *d =3D page_get_owner(page);=0A unsigned long = pfn =3D page_to_mfn(page);=0A- l4_pgentry_t *pl4e =3D page_to_virt(page= );=0A+ l4_pgentry_t *pl4e =3D map_domain_page(pfn);=0A unsigned = int i;=0A int rc =3D 0, partial =3D page->partial_pte;=0A = =0A@@ -1365,12 +1365,16 @@ static int alloc_l4_table(struct page_in=0A = put_page_from_l4e(pl4e[i], pfn, 0, 0);=0A }=0A = if ( rc < 0 )=0A+ {=0A+ unmap_domain_page(pl4e);=0A = return rc;=0A+ }=0A =0A adjust_guest_l4e(pl4e[i],= d);=0A }=0A =0A init_guest_l4_table(pl4e, d);=0A+ unmap_domain_= page(pl4e);=0A =0A return rc > 0 ? 0 : rc;=0A }=0A@@ -1464,7 +1468,7 = @@ static int free_l4_table(struct page_inf=0A {=0A struct domain *d = =3D page_get_owner(page);=0A unsigned long pfn =3D page_to_mfn(page);= =0A- l4_pgentry_t *pl4e =3D page_to_virt(page);=0A+ l4_pgentry_t = *pl4e =3D map_domain_page(pfn);=0A int rc =3D 0, partial =3D page->part= ial_pte;=0A unsigned int i =3D page->nr_validated_ptes - !partial;=0A = =0A@@ -1487,6 +1491,9 @@ static int free_l4_table(struct page_inf=0A = page->partial_pte =3D 0;=0A rc =3D -EAGAIN;=0A }=0A+=0A+ = unmap_domain_page(pl4e);=0A+=0A return rc > 0 ? 0 : rc;=0A }=0A =0A@@ = -4983,15 +4990,23 @@ int mmio_ro_do_page_fault(struct vcpu *v=0A = return rc !=3D X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;=0A }=0A = =0A-void free_xen_pagetable(void *v)=0A+void *alloc_xen_pagetable(void)=0A = {=0A- if ( system_state =3D=3D SYS_STATE_early_boot )=0A- = return;=0A+ if ( system_state !=3D SYS_STATE_early_boot )=0A+ {=0A+ = void *ptr =3D alloc_xenheap_page();=0A =0A- if ( is_xen_heap_page(= virt_to_page(v)) )=0A+ BUG_ON(!dom0 && !ptr);=0A+ return = ptr;=0A+ }=0A+=0A+ return mfn_to_virt(alloc_boot_pages(1, = 1));=0A+}=0A+=0A+void free_xen_pagetable(void *v)=0A+{=0A+ if ( = system_state !=3D SYS_STATE_early_boot )=0A free_xenheap_page(v);= =0A- else=0A- free_domheap_page(virt_to_page(v));=0A }=0A =0A /* = Convert to from superpage-mapping flags for map_pages_to_xen(). */=0A--- = a/xen/arch/x86/traps.c=0A+++ b/xen/arch/x86/traps.c=0A@@ -180,6 +180,11 @@ = static void show_guest_stack(struct vcpu=0A printk(" %p", = _p(addr));=0A stack++;=0A }=0A+ if ( mask =3D=3D PAGE_SIZE = )=0A+ {=0A+ BUILD_BUG_ON(PAGE_SIZE =3D=3D STACK_SIZE);=0A+ = unmap_domain_page(stack);=0A+ }=0A if ( i =3D=3D 0 )=0A = printk("Stack empty.");=0A printk("\n");=0A--- a/xen/arch/x86/x86_64/co= mpat/traps.c=0A+++ b/xen/arch/x86/x86_64/compat/traps.c=0A@@ -56,6 +56,11 = @@ void compat_show_guest_stack(struct vcpu=0A printk(" %08x", = addr);=0A stack++;=0A }=0A+ if ( mask =3D=3D PAGE_SIZE = )=0A+ {=0A+ BUILD_BUG_ON(PAGE_SIZE =3D=3D STACK_SIZE);=0A+ = unmap_domain_page(stack);=0A+ }=0A if ( i =3D=3D 0 )=0A = printk("Stack empty.");=0A printk("\n");=0A--- a/xen/arch/x86/x86_64/mm= .c=0A+++ b/xen/arch/x86/x86_64/mm.c=0A@@ -65,22 +65,6 @@ int __mfn_valid(un= signed long mfn)=0A pdx_group_valid));=0A }=0A = =0A-void *alloc_xen_pagetable(void)=0A-{=0A- unsigned long mfn;=0A-=0A- = if ( system_state !=3D SYS_STATE_early_boot )=0A- {=0A- = struct page_info *pg =3D alloc_domheap_page(NULL, 0);=0A-=0A- = BUG_ON(!dom0 && !pg);=0A- return pg ? page_to_virt(pg) : NULL;=0A- = }=0A-=0A- mfn =3D alloc_boot_pages(1, 1);=0A- return mfn_to_virt(mf= n);=0A-}=0A-=0A l3_pgentry_t *virt_to_xen_l3e(unsigned long v)=0A {=0A = l4_pgentry_t *pl4e;=0A@@ -154,35 +138,45 @@ void *do_page_walk(struct vcpu = *v, unsig=0A if ( is_hvm_vcpu(v) )=0A return NULL;=0A =0A- = l4t =3D mfn_to_virt(mfn);=0A+ l4t =3D map_domain_page(mfn);=0A l4e = =3D l4t[l4_table_offset(addr)];=0A- mfn =3D l4e_get_pfn(l4e);=0A+ = unmap_domain_page(l4t);=0A if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) = )=0A return NULL;=0A =0A- l3t =3D mfn_to_virt(mfn);=0A+ l3t = =3D map_l3t_from_l4e(l4e);=0A l3e =3D l3t[l3_table_offset(addr)];=0A+ = unmap_domain_page(l3t);=0A mfn =3D l3e_get_pfn(l3e);=0A if ( = !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )=0A = return NULL;=0A if ( (l3e_get_flags(l3e) & _PAGE_PSE) )=0A- = return mfn_to_virt(mfn) + (addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));=0A+ = {=0A+ mfn +=3D PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - = 1));=0A+ goto ret;=0A+ }=0A =0A- l2t =3D mfn_to_virt(mfn);=0A+= l2t =3D map_domain_page(mfn);=0A l2e =3D l2t[l2_table_offset(addr)]= ;=0A+ unmap_domain_page(l2t);=0A mfn =3D l2e_get_pfn(l2e);=0A = if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )=0A = return NULL;=0A if ( (l2e_get_flags(l2e) & _PAGE_PSE) )=0A- = return mfn_to_virt(mfn) + (addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));=0A+ = {=0A+ mfn +=3D PFN_DOWN(addr & ((1UL << L2_PAGETABLE_SHIFT) - = 1));=0A+ goto ret;=0A+ }=0A =0A- l1t =3D mfn_to_virt(mfn);=0A+= l1t =3D map_domain_page(mfn);=0A l1e =3D l1t[l1_table_offset(addr)]= ;=0A+ unmap_domain_page(l1t);=0A mfn =3D l1e_get_pfn(l1e);=0A = if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )=0A = return NULL;=0A =0A- return mfn_to_virt(mfn) + (addr & ~PAGE_MASK);=0A+ = ret:=0A+ return map_domain_page(mfn) + (addr & ~PAGE_MASK);=0A }=0A =0A = void __init pfn_pdx_hole_setup(unsigned long mask)=0A@@ -519,10 +513,9 @@ = static int setup_compat_m2p_table(struct=0A static int setup_m2p_table(stru= ct mem_hotadd_info *info)=0A {=0A unsigned long i, va, smap, emap;=0A- = unsigned int n, memflags;=0A+ unsigned int n;=0A l2_pgentry_t = *l2_ro_mpt =3D NULL;=0A l3_pgentry_t *l3_ro_mpt =3D NULL;=0A- = struct page_info *l2_pg;=0A int ret =3D 0;=0A =0A ASSERT(l4e_get_fl= ags(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)])=0A@@ -560,7 +553,6 = @@ static int setup_m2p_table(struct mem_ho=0A }=0A =0A va = =3D RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);=0A- = memflags =3D MEMF_node(phys_to_nid(i << PAGE_SHIFT));=0A =0A for ( = n =3D 0; n < CNT; ++n)=0A if ( mfn_valid(i + n * PDX_GROUP_COUN= T) )=0A@@ -587,19 +579,18 @@ static int setup_m2p_table(struct mem_ho=0A = l2_table_offset(va);=0A else=0A = {=0A- l2_pg =3D alloc_domheap_page(NULL, memflags);=0A-=0A- = if (!l2_pg)=0A+ l2_ro_mpt =3D alloc_xen_paget= able();=0A+ if ( !l2_ro_mpt )=0A {=0A = ret =3D -ENOMEM;=0A goto error;=0A = }=0A =0A- l2_ro_mpt =3D page_to_virt(l2_pg);=0A = clear_page(l2_ro_mpt);=0A l3e_write(&l3_ro_mp= t[l3_table_offset(va)],=0A- l3e_from_page(l2_pg, = __PAGE_HYPERVISOR | _PAGE_USER));=0A- l2_ro_mpt +=3D = l2_table_offset(va);=0A+ l3e_from_paddr(__pa(l2_ro= _mpt),=0A+ __PAGE_HYPERVISOR | = _PAGE_USER));=0A+ l2_ro_mpt +=3D l2_table_offset(va);=0A = }=0A =0A /* NB. Cannot be GLOBAL as shadow_mode_transl= ate reuses this area. */=0A@@ -762,12 +753,12 @@ void __init paging_init(vo= id)=0A l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));=0A = l3_ro_mpt =3D l4e_to_l3e(idle_pg_table[l4_table_offset(=0A = HIRO_COMPAT_MPT_VIRT_START)]);=0A- if ( (l2_pg =3D alloc_domheap_page(NU= LL, 0)) =3D=3D NULL )=0A+ if ( (l2_ro_mpt =3D alloc_xen_pagetable()) = =3D=3D NULL )=0A goto nomem;=0A- compat_idle_pg_table_l2 =3D = l2_ro_mpt =3D page_to_virt(l2_pg);=0A+ compat_idle_pg_table_l2 =3D = l2_ro_mpt;=0A clear_page(l2_ro_mpt);=0A l3e_write(&l3_ro_mpt[l3_tab= le_offset(HIRO_COMPAT_MPT_VIRT_START)],=0A- l3e_from_page(l2_p= g, __PAGE_HYPERVISOR));=0A+ l3e_from_paddr(__pa(l2_ro_mpt), = __PAGE_HYPERVISOR));=0A l2_ro_mpt +=3D l2_table_offset(HIRO_COMPAT_MPT_= VIRT_START);=0A /* Allocate and map the compatibility mode machine-to-p= hys table. */=0A mpt_size =3D (mpt_size >> 1) + (1UL << (L2_PAGETABLE_S= HIFT - 1));=0A--- a/xen/arch/x86/x86_64/traps.c=0A+++ b/xen/arch/x86/x86_64= /traps.c=0A@@ -175,8 +175,9 @@ void show_page_walk(unsigned long addr)=0A = =0A printk("Pagetable walk from %016lx:\n", addr);=0A =0A- l4t =3D = mfn_to_virt(mfn);=0A+ l4t =3D map_domain_page(mfn);=0A l4e =3D = l4t[l4_table_offset(addr)];=0A+ unmap_domain_page(l4t);=0A mfn =3D = l4e_get_pfn(l4e);=0A pfn =3D mfn_valid(mfn) && machine_to_phys_mapping_= valid ?=0A get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;=0A@@ = -186,8 +187,9 @@ void show_page_walk(unsigned long addr)=0A = !mfn_valid(mfn) )=0A return;=0A =0A- l3t =3D mfn_to_virt(mfn);= =0A+ l3t =3D map_domain_page(mfn);=0A l3e =3D l3t[l3_table_offset(ad= dr)];=0A+ unmap_domain_page(l3t);=0A mfn =3D l3e_get_pfn(l3e);=0A = pfn =3D mfn_valid(mfn) && machine_to_phys_mapping_valid ?=0A = get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;=0A@@ -199,8 +201,9 @@ void = show_page_walk(unsigned long addr)=0A !mfn_valid(mfn) )=0A = return;=0A =0A- l2t =3D mfn_to_virt(mfn);=0A+ l2t =3D map_domain_pag= e(mfn);=0A l2e =3D l2t[l2_table_offset(addr)];=0A+ unmap_domain_page= (l2t);=0A mfn =3D l2e_get_pfn(l2e);=0A pfn =3D mfn_valid(mfn) && = machine_to_phys_mapping_valid ?=0A get_gpfn_from_mfn(mfn) : = INVALID_M2P_ENTRY;=0A@@ -212,8 +215,9 @@ void show_page_walk(unsigned long = addr)=0A !mfn_valid(mfn) )=0A return;=0A =0A- l1t =3D = mfn_to_virt(mfn);=0A+ l1t =3D map_domain_page(mfn);=0A l1e =3D = l1t[l1_table_offset(addr)];=0A+ unmap_domain_page(l1t);=0A mfn =3D = l1e_get_pfn(l1e);=0A pfn =3D mfn_valid(mfn) && machine_to_phys_mapping_= valid ?=0A get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;=0A--- = a/xen/include/asm-x86/page.h=0A+++ b/xen/include/asm-x86/page.h=0A@@ = -172,6 +172,10 @@ static inline l4_pgentry_t l4e_from_padd=0A #define = l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))=0A = #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))= =0A =0A+#define map_l1t_from_l2e(x) ((l1_pgentry_t *)map_domain_page= (l2e_get_pfn(x)))=0A+#define map_l2t_from_l3e(x) ((l2_pgentry_t = *)map_domain_page(l3e_get_pfn(x)))=0A+#define map_l3t_from_l4e(x) = ((l3_pgentry_t *)map_domain_page(l4e_get_pfn(x)))=0A+=0A /* Given a = virtual address, get an entry offset into a page table. */=0A #define = l1_table_offset(a) \=0A (((a) >> L1_PAGETABLE_SHIFT) & = (L1_PAGETABLE_ENTRIES - 1))=0A --=__PartB5841603.0__= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --=__PartB5841603.0__=--