From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Jan Beulich <jbeulich@suse.com>,
Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH RFC 06/12] xen/x86: populate PVHv2 Dom0 physical memory map
Date: Fri, 29 Jul 2016 18:29:01 +0200 [thread overview]
Message-ID: <1469809747-11176-7-git-send-email-roger.pau@citrix.com> (raw)
In-Reply-To: <1469809747-11176-1-git-send-email-roger.pau@citrix.com>
Craft the Dom0 e820 memory map and populate it.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/domain_build.c | 199 ++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 193 insertions(+), 6 deletions(-)
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index c0ef40f..cb8ecbd 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -43,6 +43,11 @@ static long __initdata dom0_nrpages;
static long __initdata dom0_min_nrpages;
static long __initdata dom0_max_nrpages = LONG_MAX;
+/* GFN of the identity map for EPT. */
+#define HVM_IDENT_PT_GFN 0xfeffeu
+
+static unsigned int __initdata hvm_mem_stats[MAX_ORDER + 1];
+
/*
* dom0_mem=[min:<min_amt>,][max:<max_amt>,][<amt>]
*
@@ -304,7 +309,8 @@ static unsigned long __init compute_dom0_nr_pages(
avail -= max_pdx >> s;
}
- need_paging = opt_dom0_shadow || (is_pvh_domain(d) && !iommu_hap_pt_share);
+ need_paging = opt_dom0_shadow || (has_hvm_container_domain(d) &&
+ (!iommu_hap_pt_share || !paging_mode_hap(d)));
for ( ; ; need_paging = 0 )
{
nr_pages = dom0_nrpages;
@@ -336,7 +342,8 @@ static unsigned long __init compute_dom0_nr_pages(
avail -= dom0_paging_pages(d, nr_pages);
}
- if ( (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) &&
+ if ( is_pv_domain(d) &&
+ (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) &&
((dom0_min_nrpages <= 0) || (nr_pages > min_pages)) )
{
/*
@@ -547,11 +554,12 @@ static __init void pvh_map_all_iomem(struct domain *d, unsigned long nr_pages)
ASSERT(nr_holes == 0);
}
-static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages)
+static __init void hvm_setup_e820(struct domain *d, unsigned long nr_pages)
{
struct e820entry *entry, *entry_guest;
unsigned int i;
unsigned long pages, cur_pages = 0;
+ uint64_t start, end;
/*
* Craft the e820 memory map for Dom0 based on the hardware e820 map.
@@ -579,8 +587,19 @@ static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages)
continue;
}
- *entry_guest = *entry;
- pages = PFN_UP(entry_guest->size);
+ /*
+ * Make sure the start and length are aligned to PAGE_SIZE, because
+ * that's the minimum granularity of the 2nd stage translation.
+ */
+ start = ROUNDUP(entry->addr, PAGE_SIZE);
+ end = (entry->addr + entry->size) & PAGE_MASK;
+ if ( start >= end )
+ continue;
+
+ entry_guest->type = E820_RAM;
+ entry_guest->addr = start;
+ entry_guest->size = end - start;
+ pages = PFN_DOWN(entry_guest->size);
if ( (cur_pages + pages) > nr_pages )
{
/* Truncate region */
@@ -591,6 +610,8 @@ static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages)
{
cur_pages += pages;
}
+ ASSERT((entry_guest->addr & ~PAGE_MASK) == 0 &&
+ (entry_guest->size & ~PAGE_MASK) == 0);
next:
d->arch.nr_e820++;
entry_guest++;
@@ -1631,7 +1652,7 @@ static int __init construct_dom0_pv(
dom0_update_physmap(d, pfn, mfn, 0);
pvh_map_all_iomem(d, nr_pages);
- pvh_setup_e820(d, nr_pages);
+ hvm_setup_e820(d, nr_pages);
}
if ( d->domain_id == hardware_domid )
@@ -1647,15 +1668,181 @@ out:
return rc;
}
+/* Helper to convert from bytes into human-readable form. */
+static void __init pretty_print_bytes(uint64_t size)
+{
+ const char* units[] = {"B", "KB", "MB", "GB", "TB"};
+ int i = 0;
+
+ while ( ++i < sizeof(units) && size >= 1024 )
+ size >>= 10; /* size /= 1024 */
+
+ printk("%4" PRIu64 "%2s", size, units[i-1]);
+}
+
+/* Calculate the biggest usable order given a size in bytes. */
+static inline unsigned int get_order(uint64_t size)
+{
+ unsigned int order;
+ uint64_t pg;
+
+ ASSERT((size & ~PAGE_MASK) == 0);
+ pg = PFN_DOWN(size);
+ for ( order = 0; pg >= (1 << (order + 1)); order++ );
+
+ return order;
+}
+
+/* Populate an HVM memory range using the biggest possible order. */
+static void __init hvm_populate_memory_range(struct domain *d, uint64_t start,
+ uint64_t size)
+{
+ static unsigned int __initdata memflags = MEMF_no_dma|MEMF_exact_node;
+ unsigned int order;
+ struct page_info *page;
+ int rc;
+
+ ASSERT((size & ~PAGE_MASK) == 0 && (start & ~PAGE_MASK) == 0);
+
+ order = MAX_ORDER;
+ while ( size != 0 )
+ {
+ order = min(get_order(size), order);
+ page = alloc_domheap_pages(d, order, memflags);
+ if ( page == NULL )
+ {
+ if ( order == 0 && memflags )
+ {
+ /* Try again without any memflags. */
+ memflags = 0;
+ order = MAX_ORDER;
+ continue;
+ }
+ if ( order == 0 )
+ panic("Unable to allocate memory with order 0!\n");
+ order--;
+ continue;
+ }
+
+ hvm_mem_stats[order]++;
+ rc = guest_physmap_add_page(d, _gfn(PFN_DOWN(start)),
+ _mfn(page_to_mfn(page)), order);
+ if ( rc != 0 )
+ panic("Failed to populate memory: [%" PRIx64 " - %" PRIx64 "] %d\n",
+ start, start + (((uint64_t)1) << (order + PAGE_SHIFT)), rc);
+ start += ((uint64_t)1) << (order + PAGE_SHIFT);
+ size -= ((uint64_t)1) << (order + PAGE_SHIFT);
+ if ( (size & 0xffffffff) == 0 )
+ process_pending_softirqs();
+ }
+
+}
+
+static int __init hvm_setup_p2m(struct domain *d)
+{
+ struct vcpu *v = d->vcpu[0];
+ unsigned long nr_pages;
+ int i;
+
+ printk("** Preparing memory map **\n");
+
+ /*
+ * Subtract one page for the EPT identity page table and two pages
+ * for the MADT replacement.
+ */
+ nr_pages = compute_dom0_nr_pages(d, NULL, 0) - 3;
+
+ hvm_setup_e820(d, nr_pages);
+ paging_set_allocation(d, dom0_paging_pages(d, nr_pages));
+
+ printk("Dom0 memory map:\n");
+ print_e820_memory_map(d->arch.e820, d->arch.nr_e820);
+
+ printk("** Populating memory map **\n");
+ /* Populate memory map. */
+ for ( i = 0; i < d->arch.nr_e820; i++ )
+ {
+ if ( d->arch.e820[i].type != E820_RAM )
+ continue;
+
+ hvm_populate_memory_range(d, d->arch.e820[i].addr,
+ d->arch.e820[i].size);
+ }
+
+ printk("Memory allocation stats:\n");
+ for ( i = 0; i <= MAX_ORDER; i++ )
+ {
+ if ( hvm_mem_stats[MAX_ORDER - i] != 0 )
+ {
+ printk("Order %2u: ", MAX_ORDER - i);
+ pretty_print_bytes(((uint64_t)1 << (MAX_ORDER - i + PAGE_SHIFT)) *
+ hvm_mem_stats[MAX_ORDER - i]);
+ printk("\n");
+ }
+ }
+
+ if ( cpu_has_vmx && paging_mode_hap(d) && !vmx_unrestricted_guest(v) )
+ {
+ struct vcpu *saved_current;
+ struct page_info *page;
+ uint32_t *ident_pt;
+
+ /*
+ * Identity-map page table is required for running with CR0.PG=0
+ * when using Intel EPT. Create a 32-bit non-PAE page directory of
+ * superpages.
+ */
+ page = alloc_domheap_pages(d, 0, 0);
+ if ( unlikely(!page) )
+ {
+ printk("Unable to allocate page for identity map\n");
+ return -ENOMEM;
+ }
+
+ saved_current = current;
+ set_current(v);
+
+ ident_pt = __map_domain_page(page);
+ for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ )
+ ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
+ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
+ unmap_domain_page(ident_pt);
+
+ guest_physmap_add_page(d, _gfn(HVM_IDENT_PT_GFN),
+ _mfn(page_to_mfn(page)), 0);
+ d->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] =
+ HVM_IDENT_PT_GFN << PAGE_SHIFT;
+ set_current(saved_current);
+ }
+
+ return 0;
+}
+
static int __init construct_dom0_hvm(struct domain *d, const module_t *image,
unsigned long image_headroom,
module_t *initrd,
void *(*bootstrap_map)(const module_t *),
char *cmdline)
{
+ int rc;
printk("** Building a PVH Dom0 **\n");
+ /* Sanity! */
+ BUG_ON(d->domain_id != 0);
+ BUG_ON(d->vcpu[0] == NULL);
+
+ process_pending_softirqs();
+
+ iommu_hwdom_init(d);
+
+ rc = hvm_setup_p2m(d);
+ if ( rc )
+ {
+ printk("Failed to setup Dom0 physical memory map\n");
+ return rc;
+ }
+
return 0;
}
--
2.7.4 (Apple Git-66)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-07-29 16:29 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-07-29 16:28 [PATCH RFC 01/12] PVHv2 Dom0 Roger Pau Monne
2016-07-29 16:28 ` [PATCH RFC 01/12] x86/paging: introduce paging_set_allocation Roger Pau Monne
2016-07-29 16:47 ` Andrew Cooper
2016-08-02 9:47 ` Roger Pau Monne
2016-08-02 15:49 ` Roger Pau Monne
2016-08-02 16:12 ` Jan Beulich
2016-08-03 15:11 ` George Dunlap
2016-08-03 15:25 ` Jan Beulich
2016-08-03 15:28 ` George Dunlap
2016-08-03 15:37 ` Jan Beulich
2016-08-03 15:59 ` George Dunlap
2016-08-03 16:00 ` Roger Pau Monne
2016-08-03 16:15 ` Jan Beulich
2016-08-03 16:24 ` Roger Pau Monne
2016-08-04 6:19 ` Jan Beulich
2016-08-01 8:57 ` Tim Deegan
2016-07-29 16:28 ` [PATCH RFC 02/12] xen/x86: split the setup of Dom0 permissions to a function Roger Pau Monne
2016-07-29 16:28 ` [PATCH RFC 03/12] xen/x86: allow the emulated APICs to be enbled for the hardware domain Roger Pau Monne
2016-07-29 17:50 ` Andrew Cooper
2016-08-01 11:23 ` Roger Pau Monne
2016-07-29 16:28 ` [PATCH RFC 04/12] xen/x86: split Dom0 build into PV and PVHv2 Roger Pau Monne
2016-07-29 17:57 ` Andrew Cooper
2016-08-01 11:36 ` Roger Pau Monne
2016-08-04 18:28 ` Andrew Cooper
2016-07-29 16:29 ` [PATCH RFC 05/12] xen/x86: make print_e820_memory_map global Roger Pau Monne
2016-07-29 17:57 ` Andrew Cooper
2016-07-29 16:29 ` Roger Pau Monne [this message]
2016-07-29 19:04 ` [PATCH RFC 06/12] xen/x86: populate PVHv2 Dom0 physical memory map Andrew Cooper
2016-08-02 9:19 ` Roger Pau Monne
2016-08-04 18:43 ` Andrew Cooper
2016-08-05 9:40 ` Roger Pau Monne
2016-08-11 18:28 ` Andrew Cooper
2016-07-29 16:29 ` [PATCH RFC 07/12] xen/x86: parse Dom0 kernel for PVHv2 Roger Pau Monne
2016-09-26 16:16 ` Jan Beulich
2016-09-26 17:11 ` Roger Pau Monne
2016-07-29 16:29 ` [PATCH RFC 08/12] xen/x86: setup PVHv2 Dom0 CPUs Roger Pau Monne
2016-09-26 16:19 ` Jan Beulich
2016-09-26 17:05 ` Roger Pau Monne
2016-09-27 8:10 ` Jan Beulich
2016-07-29 16:29 ` [PATCH RFC 09/12] xen/x86: setup PVHv2 Dom0 ACPI tables Roger Pau Monne
2016-09-26 16:21 ` Jan Beulich
2016-07-29 16:29 ` [PATCH RFC 10/12] xen/dcpi: add a dpci passthrough handler for hardware domain Roger Pau Monne
2016-07-29 16:29 ` [PATCH RFC 11/12] xen/x86: allow a PVHv2 Dom0 to register PCI devices with Xen Roger Pau Monne
2016-07-29 16:29 ` [PATCH RFC 12/12] xen/x86: route legacy PCI interrupts to Dom0 Roger Pau Monne
2016-07-29 16:38 ` [PATCH RFC 01/12] PVHv2 Dom0 Roger Pau Monne
2016-09-26 16:25 ` Jan Beulich
2016-09-26 17:12 ` Roger Pau Monne
2016-09-26 17:55 ` Konrad Rzeszutek Wilk
2016-09-27 8:11 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1469809747-11176-7-git-send-email-roger.pau@citrix.com \
--to=roger.pau@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).