From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Jan Beulich" Subject: [PATCH 12/11] x86: debugging code for testing 16Tb support on smaller memory systems Date: Tue, 22 Jan 2013 10:58:53 +0000 Message-ID: <50FE7EFD02000078000B8359@nat28.tlf.novell.com> References: <50FE7BF502000078000B82F8@nat28.tlf.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="=__Part4B7AE8FD.0__=" Return-path: In-Reply-To: <50FE7BF502000078000B82F8@nat28.tlf.novell.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel List-Id: xen-devel@lists.xenproject.org This is a MIME message. If you are reading this text, you may want to consider changing to a mail reader or gateway that understands how to properly handle MIME multipart messages. --=__Part4B7AE8FD.0__= Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: quoted-printable Content-Disposition: inline DO NOT APPLY AS IS. Signed-off-by: Jan Beulich --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -66,8 +66,10 @@ void *map_domain_page(unsigned long mfn) struct mapcache_vcpu *vcache; struct vcpu_maphash_entry *hashent; =20 +#ifdef NDEBUG if ( mfn <=3D PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) ) return mfn_to_virt(mfn); +#endif =20 v =3D mapcache_current_vcpu(); if ( !v || is_hvm_vcpu(v) ) @@ -139,6 +141,14 @@ void *map_domain_page(unsigned long mfn) if ( ++i =3D=3D MAPHASH_ENTRIES ) i =3D 0; } while ( i !=3D MAPHASH_HASHFN(mfn) ); +if(idx >=3D dcache->entries) {//temp + mapcache_domain_dump(v->domain); + for(i =3D 0; i < ARRAY_SIZE(vcache->hash); ++i) + if(hashent->idx !=3D MAPHASHENT_NOTINUSE) { + hashent =3D &vcache->hash[i]; + printk("vc[%u]: ref=3D%u idx=3D%04x mfn=3D%08lx\n", i, hashent->refcnt,= hashent->idx, hashent->mfn); + } +} } BUG_ON(idx >=3D dcache->entries); =20 @@ -249,8 +259,10 @@ int mapcache_domain_init(struct domain * if ( is_hvm_domain(d) || is_idle_domain(d) ) return 0; =20 +#ifdef NDEBUG if ( !mem_hotplug && max_page <=3D PFN_DOWN(__pa(HYPERVISOR_VIRT_END = - 1)) ) return 0; +#endif =20 dcache->l1tab =3D xzalloc_array(l1_pgentry_t *, MAPCACHE_L2_ENTRIES + = 1); d->arch.perdomain_l2_pg[MAPCACHE_SLOT] =3D alloc_domheap_page(NULL, = memf); @@ -418,8 +430,10 @@ void *map_domain_page_global(unsigned lo =20 ASSERT(!in_irq() && local_irq_is_enabled()); =20 +#ifdef NDEBUG if ( mfn <=3D PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) ) return mfn_to_virt(mfn); +#endif =20 spin_lock(&globalmap_lock); =20 @@ -497,3 +511,26 @@ unsigned long domain_page_map_to_mfn(con =20 return l1e_get_pfn(*pl1e); } + +void mapcache_domain_dump(struct domain *d) {//temp + unsigned i, n =3D 0; + const struct mapcache_domain *dcache =3D &d->arch.pv_domain.mapcache; + const struct vcpu *v; + if(is_hvm_domain(d) || is_idle_domain(d)) + return; + for_each_vcpu(d, v) { + const struct mapcache_vcpu *vcache =3D &v->arch.pv_vcpu.mapcache; + for(i =3D 0; i < ARRAY_SIZE(vcache->hash); ++i) + n +=3D (vcache->hash[i].idx !=3D MAPHASHENT_NOTINUSE); + } + printk("Dom%d mc (#=3D%u v=3D%u) [%p]:\n", d->domain_id, n, d->max_vcpus,= __builtin_return_address(0)); + for(i =3D 0; i < BITS_TO_LONGS(dcache->entries); ++i) + printk("dcu[%02x]: %016lx\n", i, dcache->inuse[i]); + for(i =3D 0; i < BITS_TO_LONGS(dcache->entries); ++i) + printk("dcg[%02x]: %016lx\n", i, dcache->garbage[i]); + for(i =3D 0; i < dcache->entries; ++i) { + l1_pgentry_t l1e =3D DCACHE_L1ENT(dcache, i); + if((test_bit(i, dcache->inuse) && !test_bit(i, dcache->garbage)) || = (l1e_get_flags(l1e) & _PAGE_PRESENT)) + printk("dc[%04x]: %"PRIpte"\n", i, l1e_get_intpte(l1e)); + } +} --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -250,6 +250,14 @@ void __init init_frametable(void) init_spagetable(); } =20 +#ifndef NDEBUG +static unsigned int __read_mostly root_pgt_pv_xen_slots + =3D ROOT_PAGETABLE_PV_XEN_SLOTS; +static l4_pgentry_t __read_mostly split_l4e; +#else +#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS +#endif + void __init arch_init_memory(void) { unsigned long i, pfn, rstart_pfn, rend_pfn, iostart_pfn, ioend_pfn; @@ -344,6 +352,41 @@ void __init arch_init_memory(void) efi_init_memory(); =20 mem_sharing_init(); + +#ifndef NDEBUG + if ( split_gb ) + { + paddr_t split_pa =3D split_gb * GB(1); + unsigned long split_va =3D (unsigned long)__va(split_pa); + + if ( split_va < HYPERVISOR_VIRT_END && + split_va - 1 =3D=3D (unsigned long)__va(split_pa - 1) ) + { + root_pgt_pv_xen_slots =3D l4_table_offset(split_va) - + ROOT_PAGETABLE_FIRST_XEN_SLOT; + ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS); + if ( l4_table_offset(split_va) =3D=3D l4_table_offset(split_va= - 1) ) + { + l3_pgentry_t *l3tab =3D alloc_xen_pagetable(); + + if ( l3tab ) + { + const l3_pgentry_t *l3idle =3D + l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)= ]); + + for ( i =3D 0; i < l3_table_offset(split_va); ++i ) + l3tab[i] =3D l3idle[i]; + for ( ; i <=3D L3_PAGETABLE_ENTRIES; ++i ) + l3tab[i] =3D l3e_empty(); + split_l4e =3D l4e_from_pfn(virt_to_mfn(l3tab), + __PAGE_HYPERVISOR); + } + else + ++root_pgt_pv_xen_slots; + } + } + } +#endif } =20 int page_is_ram_type(unsigned long mfn, unsigned long mem_type) @@ -1320,7 +1363,12 @@ void init_guest_l4_table(l4_pgentry_t l4 /* Xen private mappings. */ memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT], &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT], - ROOT_PAGETABLE_PV_XEN_SLOTS * sizeof(l4_pgentry_t)); + root_pgt_pv_xen_slots * sizeof(l4_pgentry_t)); +#ifndef NDEBUG + if ( l4e_get_intpte(split_l4e) ) + l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots] =3D + split_l4e; +#endif l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =3D l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR); l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =3D --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -82,6 +82,11 @@ boolean_param("noapic", skip_ioapic_setu s8 __read_mostly xen_cpuidle =3D -1; boolean_param("cpuidle", xen_cpuidle); =20 +#ifndef NDEBUG +unsigned int __initdata split_gb; +integer_param("split-gb", split_gb); +#endif + cpumask_t __read_mostly cpu_present_map; =20 unsigned long __read_mostly xen_phys_start; @@ -789,6 +794,11 @@ void __init __start_xen(unsigned long mb modules_headroom =3D bzimage_headroom(bootstrap_map(mod), mod->mod_end= ); bootstrap_map(NULL); =20 +#ifndef split_gb /* Don't allow split below 4Gb. */ + if ( split_gb < 4 ) + split_gb =3D 0; +#endif + for ( i =3D boot_e820.nr_map-1; i >=3D 0; i-- ) { uint64_t s, e, mask =3D (1UL << L2_PAGETABLE_SHIFT) - 1; @@ -917,6 +927,9 @@ void __init __start_xen(unsigned long mb /* Don't overlap with other modules. */ end =3D consider_modules(s, e, size, mod, mbi->mods_count, = j); =20 + if ( split_gb && end > split_gb * GB(1) ) + continue; + if ( s < end && (headroom || ((end - size) >> PAGE_SHIFT) > mod[j].mod_start) ) @@ -958,6 +971,8 @@ void __init __start_xen(unsigned long mb kexec_reserve_area(&boot_e820); =20 setup_max_pdx(); + if ( split_gb ) + xenheap_max_mfn(split_gb << (30 - PAGE_SHIFT)); =20 /* * Walk every RAM region and map it in its entirety (on x86/64, at = least) @@ -1129,7 +1144,8 @@ void __init __start_xen(unsigned long mb unsigned long limit =3D virt_to_mfn(HYPERVISOR_VIRT_END - 1); uint64_t mask =3D PAGE_SIZE - 1; =20 - xenheap_max_mfn(limit); + if ( !split_gb ) + xenheap_max_mfn(limit); =20 /* Pass the remaining memory to the allocator. */ for ( i =3D 0; i < boot_e820.nr_map; i++ ) --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -45,6 +45,7 @@ #include #ifdef CONFIG_X86 #include +#include /* for split_gb only */ #else #define p2m_pod_offline_or_broken_hit(pg) 0 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg !=3D NULL) @@ -203,6 +204,25 @@ unsigned long __init alloc_boot_pages( pg =3D (r->e - nr_pfns) & ~(pfn_align - 1); if ( pg < r->s ) continue; + +#if defined(CONFIG_X86) && !defined(NDEBUG) + /* + * Filtering pfn_align =3D=3D 1 since the only allocations using = a bigger + * alignment are the ones used for setting up the frame table = chunks. + * Those allocations get remapped anyway, i.e. them not having = 1:1 + * mappings always accessible is not a problem. + */ + if ( split_gb && pfn_align =3D=3D 1 && + r->e > (split_gb << (30 - PAGE_SHIFT)) ) + { + pg =3D r->s; + if ( pg + nr_pfns > (split_gb << (30 - PAGE_SHIFT)) ) + continue; + r->s =3D pg + nr_pfns; + return pg; + } +#endif + _e =3D r->e; r->e =3D pg; bootmem_region_add(pg + nr_pfns, _e); --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -72,6 +72,7 @@ struct mapcache_domain { =20 int mapcache_domain_init(struct domain *); void mapcache_domain_exit(struct domain *); +void mapcache_domain_dump(struct domain *);//temp int mapcache_vcpu_init(struct vcpu *); void mapcache_override_current(struct vcpu *); =20 --- a/xen/include/asm-x86/setup.h +++ b/xen/include/asm-x86/setup.h @@ -43,4 +43,10 @@ void microcode_grab_module( =20 extern uint8_t kbd_shift_flags; =20 +#ifdef NDEBUG +# define split_gb 0 +#else +extern unsigned int split_gb; +#endif + #endif --=__Part4B7AE8FD.0__= Content-Type: text/plain; name="x86-map-domain-debug.patch" Content-Transfer-Encoding: quoted-printable Content-Disposition: attachment; filename="x86-map-domain-debug.patch" x86: debugging code for testing 16Tb support on smaller memory systems=0A= =0ADO NOT APPLY AS IS.=0A=0ASigned-off-by: Jan Beulich = =0A=0A--- a/xen/arch/x86/domain_page.c=0A+++ b/xen/arch/x86/domain_page.c= =0A@@ -66,8 +66,10 @@ void *map_domain_page(unsigned long mfn)=0A = struct mapcache_vcpu *vcache;=0A struct vcpu_maphash_entry *hashent;=0A= =0A+#ifdef NDEBUG=0A if ( mfn <=3D PFN_DOWN(__pa(HYPERVISOR_VIRT_END = - 1)) )=0A return mfn_to_virt(mfn);=0A+#endif=0A =0A v =3D = mapcache_current_vcpu();=0A if ( !v || is_hvm_vcpu(v) )=0A@@ -139,6 = +141,14 @@ void *map_domain_page(unsigned long mfn)=0A if = ( ++i =3D=3D MAPHASH_ENTRIES )=0A i =3D 0;=0A = } while ( i !=3D MAPHASH_HASHFN(mfn) );=0A+if(idx >=3D dcache->entries) = {//temp=0A+ mapcache_domain_dump(v->domain);=0A+ for(i =3D 0; i < = ARRAY_SIZE(vcache->hash); ++i)=0A+ if(hashent->idx !=3D MAPHASHENT_NOTINUS= E) {=0A+ hashent =3D &vcache->hash[i];=0A+ printk("vc[%u]: ref=3D%u = idx=3D%04x mfn=3D%08lx\n", i, hashent->refcnt, hashent->idx, hashent->mfn);= =0A+ }=0A+}=0A }=0A BUG_ON(idx >=3D dcache->entries);=0A = =0A@@ -249,8 +259,10 @@ int mapcache_domain_init(struct domain *=0A if = ( is_hvm_domain(d) || is_idle_domain(d) )=0A return 0;=0A = =0A+#ifdef NDEBUG=0A if ( !mem_hotplug && max_page <=3D PFN_DOWN(__pa(H= YPERVISOR_VIRT_END - 1)) )=0A return 0;=0A+#endif=0A =0A = dcache->l1tab =3D xzalloc_array(l1_pgentry_t *, MAPCACHE_L2_ENTRIES + = 1);=0A d->arch.perdomain_l2_pg[MAPCACHE_SLOT] =3D alloc_domheap_page(NU= LL, memf);=0A@@ -418,8 +430,10 @@ void *map_domain_page_global(unsigned = lo=0A =0A ASSERT(!in_irq() && local_irq_is_enabled());=0A =0A+#ifdef = NDEBUG=0A if ( mfn <=3D PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )=0A = return mfn_to_virt(mfn);=0A+#endif=0A =0A spin_lock(&globalmap_lo= ck);=0A =0A@@ -497,3 +511,26 @@ unsigned long domain_page_map_to_mfn(con=0A= =0A return l1e_get_pfn(*pl1e);=0A }=0A+=0A+void mapcache_domain_dump(s= truct domain *d) {//temp=0A+ unsigned i, n =3D 0;=0A+ const struct = mapcache_domain *dcache =3D &d->arch.pv_domain.mapcache;=0A+ const struct = vcpu *v;=0A+ if(is_hvm_domain(d) || is_idle_domain(d))=0A+ return;=0A+ = for_each_vcpu(d, v) {=0A+ const struct mapcache_vcpu *vcache =3D = &v->arch.pv_vcpu.mapcache;=0A+ for(i =3D 0; i < ARRAY_SIZE(vcache->hash); = ++i)=0A+ n +=3D (vcache->hash[i].idx !=3D MAPHASHENT_NOTINUSE);=0A+ = }=0A+ printk("Dom%d mc (#=3D%u v=3D%u) [%p]:\n", d->domain_id, n, = d->max_vcpus, __builtin_return_address(0));=0A+ for(i =3D 0; i < BITS_TO_LO= NGS(dcache->entries); ++i)=0A+ printk("dcu[%02x]: %016lx\n", i, dcache->in= use[i]);=0A+ for(i =3D 0; i < BITS_TO_LONGS(dcache->entries); ++i)=0A+ = printk("dcg[%02x]: %016lx\n", i, dcache->garbage[i]);=0A+ for(i =3D 0; i < = dcache->entries; ++i) {=0A+ l1_pgentry_t l1e =3D DCACHE_L1ENT(dcache, = i);=0A+ if((test_bit(i, dcache->inuse) && !test_bit(i, dcache->garbage)) = || (l1e_get_flags(l1e) & _PAGE_PRESENT))=0A+ printk("dc[%04x]: %"PRIpte"\= n", i, l1e_get_intpte(l1e));=0A+ }=0A+}=0A--- a/xen/arch/x86/mm.c=0A+++ = b/xen/arch/x86/mm.c=0A@@ -250,6 +250,14 @@ void __init init_frametable(void= )=0A init_spagetable();=0A }=0A =0A+#ifndef NDEBUG=0A+static = unsigned int __read_mostly root_pgt_pv_xen_slots=0A+ =3D ROOT_PAGETABLE_= PV_XEN_SLOTS;=0A+static l4_pgentry_t __read_mostly split_l4e;=0A+#else=0A+#= define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS=0A+#endif=0A+=0A = void __init arch_init_memory(void)=0A {=0A unsigned long i, pfn, = rstart_pfn, rend_pfn, iostart_pfn, ioend_pfn;=0A@@ -344,6 +352,41 @@ void = __init arch_init_memory(void)=0A efi_init_memory();=0A =0A = mem_sharing_init();=0A+=0A+#ifndef NDEBUG=0A+ if ( split_gb )=0A+ = {=0A+ paddr_t split_pa =3D split_gb * GB(1);=0A+ unsigned = long split_va =3D (unsigned long)__va(split_pa);=0A+=0A+ if ( = split_va < HYPERVISOR_VIRT_END &&=0A+ split_va - 1 =3D=3D = (unsigned long)__va(split_pa - 1) )=0A+ {=0A+ root_pgt_pv= _xen_slots =3D l4_table_offset(split_va) -=0A+ = ROOT_PAGETABLE_FIRST_XEN_SLOT;=0A+ ASSERT(root_pgt_pv_xen= _slots < ROOT_PAGETABLE_PV_XEN_SLOTS);=0A+ if ( l4_table_offset(= split_va) =3D=3D l4_table_offset(split_va - 1) )=0A+ {=0A+ = l3_pgentry_t *l3tab =3D alloc_xen_pagetable();=0A+=0A+ = if ( l3tab )=0A+ {=0A+ const = l3_pgentry_t *l3idle =3D=0A+ l4e_to_l3e(idle_pg_tabl= e[l4_table_offset(split_va)]);=0A+=0A+ for ( i =3D 0; i = < l3_table_offset(split_va); ++i )=0A+ l3tab[i] =3D = l3idle[i];=0A+ for ( ; i <=3D L3_PAGETABLE_ENTRIES; ++i = )=0A+ l3tab[i] =3D l3e_empty();=0A+ = split_l4e =3D l4e_from_pfn(virt_to_mfn(l3tab),=0A+ = __PAGE_HYPERVISOR);=0A+ }=0A+ = else=0A+ ++root_pgt_pv_xen_slots;=0A+ = }=0A+ }=0A+ }=0A+#endif=0A }=0A =0A int page_is_ram_type(unsigne= d long mfn, unsigned long mem_type)=0A@@ -1320,7 +1363,12 @@ void = init_guest_l4_table(l4_pgentry_t l4=0A /* Xen private mappings. */=0A = memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],=0A &idle_pg_tab= le[ROOT_PAGETABLE_FIRST_XEN_SLOT],=0A- ROOT_PAGETABLE_PV_XEN_SLOT= S * sizeof(l4_pgentry_t));=0A+ root_pgt_pv_xen_slots * sizeof(l4_= pgentry_t));=0A+#ifndef NDEBUG=0A+ if ( l4e_get_intpte(split_l4e) )=0A+ = l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots] = =3D=0A+ split_l4e;=0A+#endif=0A l4tab[l4_table_offset(LINEAR= _PT_VIRT_START)] =3D=0A l4e_from_pfn(domain_page_map_to_mfn(l4tab),= __PAGE_HYPERVISOR);=0A l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] = =3D=0A--- a/xen/arch/x86/setup.c=0A+++ b/xen/arch/x86/setup.c=0A@@ -82,6 = +82,11 @@ boolean_param("noapic", skip_ioapic_setu=0A s8 __read_mostly = xen_cpuidle =3D -1;=0A boolean_param("cpuidle", xen_cpuidle);=0A =0A+#ifnde= f NDEBUG=0A+unsigned int __initdata split_gb;=0A+integer_param("split-gb", = split_gb);=0A+#endif=0A+=0A cpumask_t __read_mostly cpu_present_map;=0A = =0A unsigned long __read_mostly xen_phys_start;=0A@@ -789,6 +794,11 @@ = void __init __start_xen(unsigned long mb=0A modules_headroom =3D = bzimage_headroom(bootstrap_map(mod), mod->mod_end);=0A bootstrap_map(NU= LL);=0A =0A+#ifndef split_gb /* Don't allow split below 4Gb. */=0A+ if = ( split_gb < 4 )=0A+ split_gb =3D 0;=0A+#endif=0A+=0A for ( i = =3D boot_e820.nr_map-1; i >=3D 0; i-- )=0A {=0A uint64_t s, e, = mask =3D (1UL << L2_PAGETABLE_SHIFT) - 1;=0A@@ -917,6 +927,9 @@ void = __init __start_xen(unsigned long mb=0A /* Don't overlap with = other modules. */=0A end =3D consider_modules(s, e, size, mod, = mbi->mods_count, j);=0A =0A+ if ( split_gb && end > split_gb * = GB(1) )=0A+ continue;=0A+=0A if ( s < end &&=0A = (headroom ||=0A ((end - size) >> = PAGE_SHIFT) > mod[j].mod_start) )=0A@@ -958,6 +971,8 @@ void __init = __start_xen(unsigned long mb=0A kexec_reserve_area(&boot_e820);=0A =0A = setup_max_pdx();=0A+ if ( split_gb )=0A+ xenheap_max_mfn(spli= t_gb << (30 - PAGE_SHIFT));=0A =0A /*=0A * Walk every RAM region = and map it in its entirety (on x86/64, at least)=0A@@ -1129,7 +1144,8 @@ = void __init __start_xen(unsigned long mb=0A unsigned long limit = =3D virt_to_mfn(HYPERVISOR_VIRT_END - 1);=0A uint64_t mask =3D = PAGE_SIZE - 1;=0A =0A- xenheap_max_mfn(limit);=0A+ if ( = !split_gb )=0A+ xenheap_max_mfn(limit);=0A =0A /* Pass = the remaining memory to the allocator. */=0A for ( i =3D 0; i < = boot_e820.nr_map; i++ )=0A--- a/xen/common/page_alloc.c=0A+++ b/xen/common/= page_alloc.c=0A@@ -45,6 +45,7 @@=0A #include =0A #ifdef = CONFIG_X86=0A #include =0A+#include /* for = split_gb only */=0A #else=0A #define p2m_pod_offline_or_broken_hit(pg) = 0=0A #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg !=3D = NULL)=0A@@ -203,6 +204,25 @@ unsigned long __init alloc_boot_pages(=0A = pg =3D (r->e - nr_pfns) & ~(pfn_align - 1);=0A if ( pg < r->s = )=0A continue;=0A+=0A+#if defined(CONFIG_X86) && !defined(NDEBU= G)=0A+ /*=0A+ * Filtering pfn_align =3D=3D 1 since the only = allocations using a bigger=0A+ * alignment are the ones used for = setting up the frame table chunks.=0A+ * Those allocations get = remapped anyway, i.e. them not having 1:1=0A+ * mappings always = accessible is not a problem.=0A+ */=0A+ if ( split_gb && = pfn_align =3D=3D 1 &&=0A+ r->e > (split_gb << (30 - PAGE_SHIFT)= ) )=0A+ {=0A+ pg =3D r->s;=0A+ if ( pg + = nr_pfns > (split_gb << (30 - PAGE_SHIFT)) )=0A+ continue;=0A= + r->s =3D pg + nr_pfns;=0A+ return pg;=0A+ = }=0A+#endif=0A+=0A _e =3D r->e;=0A r->e =3D pg;=0A = bootmem_region_add(pg + nr_pfns, _e);=0A--- a/xen/include/asm-x86/domain.h= =0A+++ b/xen/include/asm-x86/domain.h=0A@@ -72,6 +72,7 @@ struct mapcache_d= omain {=0A =0A int mapcache_domain_init(struct domain *);=0A void = mapcache_domain_exit(struct domain *);=0A+void mapcache_domain_dump(struct = domain *);//temp=0A int mapcache_vcpu_init(struct vcpu *);=0A void = mapcache_override_current(struct vcpu *);=0A =0A--- a/xen/include/asm-x86/s= etup.h=0A+++ b/xen/include/asm-x86/setup.h=0A@@ -43,4 +43,10 @@ void = microcode_grab_module(=0A =0A extern uint8_t kbd_shift_flags;=0A =0A+#ifdef= NDEBUG=0A+# define split_gb 0=0A+#else=0A+extern unsigned int split_gb;=0A= +#endif=0A+=0A #endif=0A --=__Part4B7AE8FD.0__= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --=__Part4B7AE8FD.0__=--