All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xen.org, Ian.Campbell@citrix.com,
	ian.jackson@eu.citrix.com, stefano.stabellini@eu.citrix.com,
	wei.liu2@citrix.com, roger.pau@citrix.com
Cc: Juergen Gross <jgross@suse.com>
Subject: [PATCH v4 5/9] libxc: use domain builder architecture private data for x86 pv domains
Date: Thu,  5 Nov 2015 15:36:31 +0100	[thread overview]
Message-ID: <1446734195-20257-6-git-send-email-jgross@suse.com> (raw)
In-Reply-To: <1446734195-20257-1-git-send-email-jgross@suse.com>

Move some data private to the x86 domain builder to the private data
section. Remove extra_pages as they are used nowhere.

Signed-off-by: Juergen Gross <jgross@suse.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
---
 tools/libxc/include/xc_dom.h |  8 --------
 tools/libxc/xc_dom_x86.c     | 48 +++++++++++++++++++++++++++++---------------
 2 files changed, 32 insertions(+), 24 deletions(-)

diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h
index 09f73cd..0ba9821 100644
--- a/tools/libxc/include/xc_dom.h
+++ b/tools/libxc/include/xc_dom.h
@@ -94,15 +94,7 @@ struct xc_dom_image {
     xen_pfn_t pfn_alloc_end;
     xen_vaddr_t virt_alloc_end;
     xen_vaddr_t bsd_symtab_start;
-
-    /* initial page tables */
-    unsigned int pgtables;
-    unsigned int pg_l4;
-    unsigned int pg_l3;
-    unsigned int pg_l2;
-    unsigned int pg_l1;
     unsigned int alloc_bootstack;
-    unsigned int extra_pages;
     xen_vaddr_t virt_pgtab_end;
 
     /* other state info */
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index ea32b00..aba50df 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -69,6 +69,15 @@
 #define round_down(addr, mask)   ((addr) & ~(mask))
 #define round_up(addr, mask)     ((addr) | (mask))
 
+struct xc_dom_image_x86 {
+    /* initial page tables */
+    unsigned int pgtables;
+    unsigned int pg_l4;
+    unsigned int pg_l3;
+    unsigned int pg_l2;
+    unsigned int pg_l1;
+};
+
 /* get guest IO ABI protocol */
 const char *xc_domain_get_native_protocol(xc_interface *xch,
                                           uint32_t domid)
@@ -132,9 +141,9 @@ static int alloc_pgtables(struct xc_dom_image *dom, int pae,
     int pages, extra_pages;
     xen_vaddr_t try_virt_end;
     xen_pfn_t try_pfn_end;
+    struct xc_dom_image_x86 *domx86 = dom->arch_private;
 
     extra_pages = dom->alloc_bootstack ? 1 : 0;
-    extra_pages += dom->extra_pages;
     extra_pages += 128; /* 512kB padding */
     pages = extra_pages;
     for ( ; ; )
@@ -152,29 +161,30 @@ static int alloc_pgtables(struct xc_dom_image *dom, int pae,
             return -ENOMEM;
         }
 
-        dom->pg_l4 =
+        domx86->pg_l4 =
             nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l4_bits);
-        dom->pg_l3 =
+        domx86->pg_l3 =
             nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l3_bits);
-        dom->pg_l2 =
+        domx86->pg_l2 =
             nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l2_bits);
-        dom->pg_l1 =
+        domx86->pg_l1 =
             nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l1_bits);
         if (pae && try_virt_end < 0xc0000000)
         {
             DOMPRINTF("%s: PAE: extra l2 page table for l3#3",
                       __FUNCTION__);
-            dom->pg_l2++;
+            domx86->pg_l2++;
         }
-        dom->pgtables = dom->pg_l4 + dom->pg_l3 + dom->pg_l2 + dom->pg_l1;
-        pages = dom->pgtables + extra_pages;
+        domx86->pgtables = domx86->pg_l4 + domx86->pg_l3 +
+                           domx86->pg_l2 + domx86->pg_l1;
+        pages = domx86->pgtables + extra_pages;
         if ( dom->virt_alloc_end + pages * PAGE_SIZE_X86 <= try_virt_end + 1 )
             break;
     }
     dom->virt_pgtab_end = try_virt_end + 1;
 
     return xc_dom_alloc_segment(dom, &dom->pgtables_seg, "page tables", 0,
-                                dom->pgtables * PAGE_SIZE_X86);
+                                domx86->pgtables * PAGE_SIZE_X86);
 }
 
 /* ------------------------------------------------------------------------ */
@@ -262,9 +272,10 @@ static xen_pfn_t move_l3_below_4G(struct xc_dom_image *dom,
 
 static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
 {
+    struct xc_dom_image_x86 *domx86 = dom->arch_private;
     xen_pfn_t l3pfn = dom->pgtables_seg.pfn;
-    xen_pfn_t l2pfn = l3pfn + dom->pg_l3;
-    xen_pfn_t l1pfn = l2pfn + dom->pg_l2;
+    xen_pfn_t l2pfn = l3pfn + domx86->pg_l3;
+    xen_pfn_t l1pfn = l2pfn + domx86->pg_l2;
     l3_pgentry_64_t *l3tab;
     l2_pgentry_64_t *l2tab = NULL;
     l1_pgentry_64_t *l1tab = NULL;
@@ -373,10 +384,11 @@ static int alloc_pgtables_x86_64(struct xc_dom_image *dom)
 
 static int setup_pgtables_x86_64(struct xc_dom_image *dom)
 {
+    struct xc_dom_image_x86 *domx86 = dom->arch_private;
     xen_pfn_t l4pfn = dom->pgtables_seg.pfn;
-    xen_pfn_t l3pfn = l4pfn + dom->pg_l4;
-    xen_pfn_t l2pfn = l3pfn + dom->pg_l3;
-    xen_pfn_t l1pfn = l2pfn + dom->pg_l2;
+    xen_pfn_t l3pfn = l4pfn + domx86->pg_l4;
+    xen_pfn_t l2pfn = l3pfn + domx86->pg_l3;
+    xen_pfn_t l1pfn = l2pfn + domx86->pg_l2;
     l4_pgentry_64_t *l4tab = xc_dom_pfn_to_ptr(dom, l4pfn, 1);
     l3_pgentry_64_t *l3tab = NULL;
     l2_pgentry_64_t *l2tab = NULL;
@@ -619,6 +631,7 @@ static int alloc_magic_pages_hvm(struct xc_dom_image *dom)
 
 static int start_info_x86_32(struct xc_dom_image *dom)
 {
+    struct xc_dom_image_x86 *domx86 = dom->arch_private;
     start_info_x86_32_t *start_info =
         xc_dom_pfn_to_ptr(dom, dom->start_info_pfn, 1);
     xen_pfn_t shinfo =
@@ -639,7 +652,7 @@ static int start_info_x86_32(struct xc_dom_image *dom)
     start_info->nr_pages = dom->total_pages;
     start_info->shared_info = shinfo << PAGE_SHIFT_X86;
     start_info->pt_base = dom->pgtables_seg.vstart;
-    start_info->nr_pt_frames = dom->pgtables;
+    start_info->nr_pt_frames = domx86->pgtables;
     start_info->mfn_list = dom->p2m_seg.vstart;
 
     start_info->flags = dom->flags;
@@ -665,6 +678,7 @@ static int start_info_x86_32(struct xc_dom_image *dom)
 
 static int start_info_x86_64(struct xc_dom_image *dom)
 {
+    struct xc_dom_image_x86 *domx86 = dom->arch_private;
     start_info_x86_64_t *start_info =
         xc_dom_pfn_to_ptr(dom, dom->start_info_pfn, 1);
     xen_pfn_t shinfo =
@@ -685,7 +699,7 @@ static int start_info_x86_64(struct xc_dom_image *dom)
     start_info->nr_pages = dom->total_pages;
     start_info->shared_info = shinfo << PAGE_SHIFT_X86;
     start_info->pt_base = dom->pgtables_seg.vstart;
-    start_info->nr_pt_frames = dom->pgtables;
+    start_info->nr_pt_frames = domx86->pgtables;
     start_info->mfn_list = dom->p2m_seg.vstart;
 
     start_info->flags = dom->flags;
@@ -1650,6 +1664,7 @@ static struct xc_dom_arch xc_dom_32_pae = {
     .native_protocol = XEN_IO_PROTO_ABI_X86_32,
     .page_shift = PAGE_SHIFT_X86,
     .sizeof_pfn = 4,
+    .arch_private_size = sizeof(struct xc_dom_image_x86),
     .alloc_magic_pages = alloc_magic_pages,
     .alloc_pgtables = alloc_pgtables_x86_32_pae,
     .setup_pgtables = setup_pgtables_x86_32_pae,
@@ -1666,6 +1681,7 @@ static struct xc_dom_arch xc_dom_64 = {
     .native_protocol = XEN_IO_PROTO_ABI_X86_64,
     .page_shift = PAGE_SHIFT_X86,
     .sizeof_pfn = 8,
+    .arch_private_size = sizeof(struct xc_dom_image_x86),
     .alloc_magic_pages = alloc_magic_pages,
     .alloc_pgtables = alloc_pgtables_x86_64,
     .setup_pgtables = setup_pgtables_x86_64,
-- 
2.1.4

  parent reply	other threads:[~2015-11-05 14:36 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-05 14:36 [PATCH v4 0/9] libxc: support building large pv-domains Juergen Gross
2015-11-05 14:36 ` [PATCH v4 1/9] libxc: reorganize domain builder guest memory allocator Juergen Gross
2015-11-12 11:14   ` Wei Liu
2015-11-12 11:20     ` Ian Campbell
2015-11-12 11:22       ` Wei Liu
2015-11-12 11:29       ` Juergen Gross
2015-11-12 11:32         ` Wei Liu
2015-11-12 12:28           ` Juergen Gross
2015-11-12 11:21     ` Juergen Gross
2015-11-12 11:24       ` Wei Liu
2015-11-05 14:36 ` [PATCH v4 2/9] xen: add generic flag to elf_dom_parms indicating support of unmapped initrd Juergen Gross
2015-11-05 17:38   ` Andrew Cooper
2015-11-05 14:36 ` [PATCH v4 3/9] libxc: rename domain builder count_pgtables to alloc_pgtables Juergen Gross
2015-11-05 14:36 ` [PATCH v4 4/9] libxc: introduce domain builder architecture specific data Juergen Gross
2015-11-05 14:36 ` Juergen Gross [this message]
2015-11-05 14:36 ` [PATCH v4 6/9] libxc: create unmapped initrd in domain builder if supported Juergen Gross
2015-11-05 14:36 ` [PATCH v4 7/9] libxc: split p2m allocation in domain builder from other magic pages Juergen Gross
2015-11-05 14:36 ` [PATCH v4 8/9] libxc: rework of domain builder's page table handler Juergen Gross
2015-11-12 12:39   ` Wei Liu
2015-11-12 12:45     ` Juergen Gross
2015-11-05 14:36 ` [PATCH v4 9/9] libxc: create p2m list outside of kernel mapping if supported Juergen Gross
2015-11-12 12:42   ` Wei Liu
2015-11-12  5:09 ` [PATCH v4 0/9] libxc: support building large pv-domains Juergen Gross
2015-11-12  9:39   ` Wei Liu
2015-11-12  9:41     ` Juergen Gross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1446734195-20257-6-git-send-email-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=roger.pau@citrix.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.