All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC][PATCH] Create domains with superpages
@ 2009-03-26 15:20 Dave McCracken
  0 siblings, 0 replies; only message in thread
From: Dave McCracken @ 2009-03-26 15:20 UTC (permalink / raw)
  To: Xen Developers List

[-- Attachment #1: Type: text/plain, Size: 399 bytes --]


This patch modifies xend to always create and restore domains to 2M pages.  It 
currently works unconditionally, ie once the library is installed all domains 
will be created with 2M pages.

The patch is intended only to show feasibility and for testing.  I am working 
on adding an option in the domain config file to enable this functionality on 
a per-domain basis.

Dave McCracken
Oracle Corp.

[-- Attachment #2: xen-hpage-090312.patch --]
[-- Type: text/x-diff, Size: 11784 bytes --]

--- xen-unstable/./tools/libxc/xc_dom_x86.c	2009-01-07 08:49:54.000000000 -0600
+++ xen-hpage/./tools/libxc/xc_dom_x86.c	2009-02-25 13:07:08.000000000 -0600
@@ -26,6 +26,9 @@
 
 /* ------------------------------------------------------------------------ */
 
+#define SUPERPAGE_PFN_SHIFT  9
+#define SUPERPAGE_NR_PFNS    (1UL << SUPERPAGE_PFN_SHIFT)
+
 #define bits_to_mask(bits)       (((xen_vaddr_t)1 << (bits))-1)
 #define round_down(addr, mask)   ((addr) & ~(mask))
 #define round_up(addr, mask)     ((addr) | (mask))
@@ -695,6 +698,7 @@ int arch_setup_meminit(struct xc_dom_ima
 {
     int rc;
     xen_pfn_t pfn;
+    int superpages = 1;
 
     rc = x86_compat(dom->guest_xc, dom->guest_domid, dom->guest_type);
     if ( rc )
@@ -707,15 +711,44 @@ int arch_setup_meminit(struct xc_dom_ima
             return rc;
     }
 
-    /* setup initial p2m */
     dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom->total_pages);
-    for ( pfn = 0; pfn < dom->total_pages; pfn++ )
-        dom->p2m_host[pfn] = pfn;
+    if (superpages)
+    {
+        int count = dom->total_pages >> SUPERPAGE_PFN_SHIFT;
+        xen_pfn_t extents[count];
 
-    /* allocate guest memory */
-    rc = xc_domain_memory_populate_physmap(dom->guest_xc, dom->guest_domid,
-                                           dom->total_pages, 0, 0,
-                                           dom->p2m_host);
+        for (pfn = 0; pfn < count; pfn++)
+            extents[pfn] = pfn << SUPERPAGE_PFN_SHIFT;
+        rc = xc_domain_memory_populate_physmap(dom->guest_xc, dom->guest_domid,
+                                               count, SUPERPAGE_PFN_SHIFT, 0,
+                                               extents);
+        if (!rc)
+        {
+            int i, j;
+            xen_pfn_t mfn;
+
+            /* Expand the returned mfn into the p2m array */
+            pfn = 0;
+            for (i = 0; i < count; i++)
+            {
+                mfn = extents[i];
+                for (j = 0; j < SUPERPAGE_NR_PFNS; j++, pfn++)
+                {
+                    dom->p2m_host[pfn] = mfn + j;
+                }
+            }
+        }
+    } else
+    {
+        /* setup initial p2m */
+        for ( pfn = 0; pfn < dom->total_pages; pfn++ )
+            dom->p2m_host[pfn] = pfn;
+
+        /* allocate guest memory */
+        rc = xc_domain_memory_populate_physmap(dom->guest_xc, dom->guest_domid,
+                                               dom->total_pages, 0, 0,
+                                               dom->p2m_host);
+    }
     return rc;
 }
 
--- xen-unstable/./tools/libxc/xc_domain_restore.c	2009-01-07 08:49:54.000000000 -0600
+++ xen-hpage/./tools/libxc/xc_domain_restore.c	2009-03-09 12:27:24.000000000 -0500
@@ -53,13 +53,95 @@ static xen_pfn_t *live_p2m = NULL;
 /* A table mapping each PFN to its new MFN. */
 static xen_pfn_t *p2m = NULL;
 
-/* A table of P2M mappings in the current region */
-static xen_pfn_t *p2m_batch = NULL;
-
 /* Address size of the guest, in bytes */
 unsigned int guest_width;
 
 /*
+**
+**
+*/
+#define SUPERPAGE_PFN_SHIFT  9
+#define SUPERPAGE_NR_PFNS    (1UL << SUPERPAGE_PFN_SHIFT)
+
+static int allocate_mfn(int xc_handle, uint32_t dom, unsigned long pfn)
+{
+    unsigned long mfn;
+    int superpages = 1;
+
+    if (superpages)
+    {
+        unsigned long base_pfn;
+
+        mfn = pfn;
+        base_pfn = mfn & ~(SUPERPAGE_NR_PFNS-1);
+
+        if (xc_domain_memory_populate_physmap(xc_handle, dom, 1,
+                                              SUPERPAGE_PFN_SHIFT, 0, &mfn) != 0)
+        {
+            ERROR("Failed to allocate physical memory.!\n"); 
+            errno = ENOMEM;
+            return 1;
+        }
+        for (pfn = base_pfn; pfn < base_pfn + SUPERPAGE_NR_PFNS; pfn++, mfn++)
+        {
+            p2m[pfn] = mfn;
+        }
+    }
+    else
+    {
+        mfn = pfn;
+        if (xc_domain_memory_populate_physmap(xc_handle, dom, 1, 0,
+                                              0, &mfn) != 0)
+        {
+            ERROR("Failed to allocate physical memory.!\n"); 
+            errno = ENOMEM;
+            return 1;
+        }
+        p2m[pfn] = mfn;
+    }
+    return 0;
+}
+
+static int allocate_physmem(int xc_handle, uint32_t dom,
+                            unsigned long *region_pfn_type, int region_size,
+                            unsigned int hvm, xen_pfn_t *region_mfn)
+{
+	int i;
+    unsigned long pfn;
+    unsigned long pagetype;
+
+    for (i = 0; i < region_size; i++)
+    {
+        pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
+        pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
+
+        if ( pfn > p2m_size )
+        {
+            ERROR("pfn out of range");
+            return 1;
+        }
+        if (pagetype == XEN_DOMCTL_PFINFO_XTAB)
+        {
+            region_mfn[i] = ~0UL;
+        }
+        else 
+        {
+            if (p2m[pfn] == INVALID_P2M_ENTRY)
+            {
+                if (allocate_mfn(xc_handle, dom, pfn) != 0)
+                    return 1;
+            }
+
+            /* setup region_mfn[] for batch map.
+             * For HVM guests, this interface takes PFNs, not MFNs */
+            region_mfn[i] = hvm ? pfn : p2m[pfn]; 
+        }
+    }
+    return 0;
+}
+
+
+/*
 ** In the state file (or during transfer), all page-table pages are
 ** converted into a 'canonical' form where references to actual mfns
 ** are replaced with references to the corresponding pfns.
@@ -72,69 +154,28 @@ static int uncanonicalize_pagetable(int 
     int i, pte_last;
     unsigned long pfn;
     uint64_t pte;
-    int nr_mfns = 0; 
 
     pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
 
-    /* First pass: work out how many (if any) MFNs we need to alloc */
     for ( i = 0; i < pte_last; i++ )
     {
         if ( pt_levels == 2 )
             pte = ((uint32_t *)page)[i];
         else
             pte = ((uint64_t *)page)[i];
-
+        
         /* XXX SMH: below needs fixing for PROT_NONE etc */
         if ( !(pte & _PAGE_PRESENT) )
             continue;
         
         pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
-        
-        if ( pfn >= p2m_size )
-        {
-            /* This "page table page" is probably not one; bail. */
-            ERROR("Frame number in type %lu page table is out of range: "
-                  "i=%d pfn=0x%lx p2m_size=%lu",
-                  type >> 28, i, pfn, p2m_size);
-            return 0;
-        }
-        
+
+        /* Allocate mfn if necessary */
         if ( p2m[pfn] == INVALID_P2M_ENTRY )
         {
-            /* Have a 'valid' PFN without a matching MFN - need to alloc */
-            p2m_batch[nr_mfns++] = pfn; 
-            p2m[pfn]--;
+            if (allocate_mfn(xc_handle, dom, pfn) != 0)
+                return 0;
         }
-    }
-
-    /* Allocate the requisite number of mfns. */
-    if ( nr_mfns &&
-         (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0, 0,
-                                            p2m_batch) != 0) )
-    { 
-        ERROR("Failed to allocate memory for batch.!\n"); 
-        errno = ENOMEM;
-        return 0; 
-    }
-    
-    /* Second pass: uncanonicalize each present PTE */
-    nr_mfns = 0;
-    for ( i = 0; i < pte_last; i++ )
-    {
-        if ( pt_levels == 2 )
-            pte = ((uint32_t *)page)[i];
-        else
-            pte = ((uint64_t *)page)[i];
-        
-        /* XXX SMH: below needs fixing for PROT_NONE etc */
-        if ( !(pte & _PAGE_PRESENT) )
-            continue;
-        
-        pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
-
-        if ( p2m[pfn] == (INVALID_P2M_ENTRY-1) )
-            p2m[pfn] = p2m_batch[nr_mfns++];
-
         pte &= ~MADDR_MASK_X86;
         pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
 
@@ -377,11 +418,9 @@ int xc_domain_restore(int xc_handle, int
 
     region_mfn = xg_memalign(PAGE_SIZE, ROUNDUP(
                               MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT));
-    p2m_batch  = xg_memalign(PAGE_SIZE, ROUNDUP(
-                              MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT));
 
     if ( (p2m == NULL) || (pfn_type == NULL) ||
-         (region_mfn == NULL) || (p2m_batch == NULL) )
+         (region_mfn == NULL) )
     {
         ERROR("memory alloc failed");
         errno = ENOMEM;
@@ -390,8 +429,6 @@ int xc_domain_restore(int xc_handle, int
 
     memset(region_mfn, 0,
            ROUNDUP(MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT)); 
-    memset(p2m_batch, 0,
-           ROUNDUP(MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT)); 
 
     if ( lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
     {
@@ -399,12 +436,6 @@ int xc_domain_restore(int xc_handle, int
         goto out;
     }
 
-    if ( lock_pages(p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
-    {
-        ERROR("Could not lock p2m_batch");
-        goto out;
-    }
-
     /* Get the domain's shared-info frame. */
     domctl.cmd = XEN_DOMCTL_getdomaininfo;
     domctl.domain = (domid_t)dom;
@@ -437,7 +468,7 @@ int xc_domain_restore(int xc_handle, int
     n = m = 0;
     for ( ; ; )
     {
-        int j, nr_mfns = 0; 
+        int j; 
 
         this_pc = (n * 100) / p2m_size;
         if ( (this_pc - prev_pc) >= 5 )
@@ -521,57 +552,9 @@ int xc_domain_restore(int xc_handle, int
             goto out;
         }
 
-        /* First pass for this batch: work out how much memory to alloc */
-        nr_mfns = 0; 
-        for ( i = 0; i < j; i++ )
-        {
-            unsigned long pfn, pagetype;
-            pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
-            pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
-
-            if ( (pagetype != XEN_DOMCTL_PFINFO_XTAB) && 
-                 (p2m[pfn] == INVALID_P2M_ENTRY) )
-            {
-                /* Have a live PFN which hasn't had an MFN allocated */
-                p2m_batch[nr_mfns++] = pfn; 
-                p2m[pfn]--;
-            }
-        } 
-
-        /* Now allocate a bunch of mfns for this batch */
-        if ( nr_mfns &&
-             (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
-                                                0, p2m_batch) != 0) )
-        { 
-            ERROR("Failed to allocate memory for batch.!\n"); 
-            errno = ENOMEM;
+        if (allocate_physmem(xc_handle, dom, region_pfn_type,
+                             j, hvm, region_mfn) != 0)
             goto out;
-        }
-
-        /* Second pass for this batch: update p2m[] and region_mfn[] */
-        nr_mfns = 0; 
-        for ( i = 0; i < j; i++ )
-        {
-            unsigned long pfn, pagetype;
-            pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
-            pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
-
-            if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
-                region_mfn[i] = ~0UL; /* map will fail but we don't care */
-            else 
-            {
-                if ( p2m[pfn] == (INVALID_P2M_ENTRY-1) )
-                {
-                    /* We just allocated a new mfn above; update p2m */
-                    p2m[pfn] = p2m_batch[nr_mfns++]; 
-                    nr_pfns++; 
-                }
-
-                /* setup region_mfn[] for batch map.
-                 * For HVM guests, this interface takes PFNs, not MFNs */
-                region_mfn[i] = hvm ? pfn : p2m[pfn]; 
-            }
-        } 
 
         /* Map relevant mfns */
         region_base = xc_map_foreign_batch(
@@ -1223,3 +1206,12 @@ int xc_domain_restore(int xc_handle, int
     
     return rc;
 }
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2009-03-26 15:20 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-03-26 15:20 [RFC][PATCH] Create domains with superpages Dave McCracken

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.