All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xen.org>
Subject: [PATCH 05/11] x86: consolidate initialization of PV guest L4 page tables
Date: Tue, 22 Jan 2013 10:52:20 +0000	[thread overview]
Message-ID: <50FE7D7402000078000B8323@nat28.tlf.novell.com> (raw)
In-Reply-To: <50FE7BF502000078000B82F8@nat28.tlf.novell.com>

[-- Attachment #1: Type: text/plain, Size: 3583 bytes --]

So far this has been repeated in 3 places, requiring to remember to
update all of them if a change is being made.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -290,13 +290,8 @@ static int setup_compat_l4(struct vcpu *
     pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
 
     l4tab = page_to_virt(pg);
-    copy_page(l4tab, idle_pg_table);
-    l4tab[0] = l4e_empty();
-    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_page(pg, __PAGE_HYPERVISOR);
-    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_paddr(__pa(v->domain->arch.mm_perdomain_l3),
-                       __PAGE_HYPERVISOR);
+    clear_page(l4tab);
+    init_guest_l4_table(l4tab, v->domain);
 
     v->arch.guest_table = pagetable_from_page(pg);
     v->arch.guest_table_user = v->arch.guest_table;
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -622,13 +622,7 @@ int __init construct_dom0(
         l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
     }
     clear_page(l4tab);
-    for ( i = l4_table_offset(HYPERVISOR_VIRT_START);
-          i < l4_table_offset(HYPERVISOR_VIRT_END); ++i )
-        l4tab[i] = idle_pg_table[i];
-    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
-    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
+    init_guest_l4_table(l4tab, d);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     if ( is_pv_32on64_domain(d) )
         v->arch.guest_table_user = v->arch.guest_table;
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1315,6 +1315,18 @@ static int alloc_l3_table(struct page_in
     return rc > 0 ? 0 : rc;
 }
 
+void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d)
+{
+    /* Xen private mappings. */
+    memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
+    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
+        l4e_from_pfn(virt_to_mfn(l4tab), __PAGE_HYPERVISOR);
+    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
+        l4e_from_pfn(virt_to_mfn(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
+}
+
 static int alloc_l4_table(struct page_info *page, int preemptible)
 {
     struct domain *d = page_get_owner(page);
@@ -1358,15 +1370,7 @@ static int alloc_l4_table(struct page_in
         adjust_guest_l4e(pl4e[i], d);
     }
 
-    /* Xen private mappings. */
-    memcpy(&pl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
-    pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_pfn(pfn, __PAGE_HYPERVISOR);
-    pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
-                      __PAGE_HYPERVISOR);
+    init_guest_l4_table(pl4e, d);
 
     return rc > 0 ? 0 : rc;
 }
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -316,6 +316,8 @@ static inline void *__page_to_virt(const
 int free_page_type(struct page_info *page, unsigned long type,
                    int preemptible);
 
+void init_guest_l4_table(l4_pgentry_t[], const struct domain *);
+
 int is_iomem_page(unsigned long mfn);
 
 void clear_superpage_mark(struct page_info *page);




[-- Attachment #2: x86-guest-l4-init.patch --]
[-- Type: text/plain, Size: 3639 bytes --]

x86: consolidate initialization of PV guest L4 page tables

So far this has been repeated in 3 places, requiring to remember to
update all of them if a change is being made.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -290,13 +290,8 @@ static int setup_compat_l4(struct vcpu *
     pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
 
     l4tab = page_to_virt(pg);
-    copy_page(l4tab, idle_pg_table);
-    l4tab[0] = l4e_empty();
-    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_page(pg, __PAGE_HYPERVISOR);
-    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_paddr(__pa(v->domain->arch.mm_perdomain_l3),
-                       __PAGE_HYPERVISOR);
+    clear_page(l4tab);
+    init_guest_l4_table(l4tab, v->domain);
 
     v->arch.guest_table = pagetable_from_page(pg);
     v->arch.guest_table_user = v->arch.guest_table;
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -622,13 +622,7 @@ int __init construct_dom0(
         l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
     }
     clear_page(l4tab);
-    for ( i = l4_table_offset(HYPERVISOR_VIRT_START);
-          i < l4_table_offset(HYPERVISOR_VIRT_END); ++i )
-        l4tab[i] = idle_pg_table[i];
-    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
-    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
+    init_guest_l4_table(l4tab, d);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     if ( is_pv_32on64_domain(d) )
         v->arch.guest_table_user = v->arch.guest_table;
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1315,6 +1315,18 @@ static int alloc_l3_table(struct page_in
     return rc > 0 ? 0 : rc;
 }
 
+void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d)
+{
+    /* Xen private mappings. */
+    memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
+    l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
+        l4e_from_pfn(virt_to_mfn(l4tab), __PAGE_HYPERVISOR);
+    l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
+        l4e_from_pfn(virt_to_mfn(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
+}
+
 static int alloc_l4_table(struct page_info *page, int preemptible)
 {
     struct domain *d = page_get_owner(page);
@@ -1358,15 +1370,7 @@ static int alloc_l4_table(struct page_in
         adjust_guest_l4e(pl4e[i], d);
     }
 
-    /* Xen private mappings. */
-    memcpy(&pl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
-           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
-    pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_pfn(pfn, __PAGE_HYPERVISOR);
-    pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
-                      __PAGE_HYPERVISOR);
+    init_guest_l4_table(pl4e, d);
 
     return rc > 0 ? 0 : rc;
 }
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -316,6 +316,8 @@ static inline void *__page_to_virt(const
 int free_page_type(struct page_info *page, unsigned long type,
                    int preemptible);
 
+void init_guest_l4_table(l4_pgentry_t[], const struct domain *);
+
 int is_iomem_page(unsigned long mfn);
 
 void clear_superpage_mark(struct page_info *page);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2013-01-22 10:52 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-22 10:45 [PATCH 00/11] x86: support up to 16Tb Jan Beulich
2013-01-22 10:50 ` [PATCH 02/11] x86: extend frame table virtual space Jan Beulich
2013-01-22 10:50 ` [PATCH 03/11] x86: re-introduce map_domain_page() et al Jan Beulich
2013-01-22 10:51 ` [PATCH 04/11] x86: properly use map_domain_page() when building Dom0 Jan Beulich
2013-01-22 10:52 ` Jan Beulich [this message]
2013-01-22 10:53 ` [PATCH 06/11] x86: properly use map_domain_page() during domain creation/destruction Jan Beulich
2013-01-22 10:55 ` [PATCH 07/11] x86: properly use map_domain_page() during page table manipulation Jan Beulich
2013-01-22 10:55 ` [PATCH 08/11] x86: properly use map_domain_page() in nested HVM code Jan Beulich
2013-01-22 10:56 ` [PATCH 09/11] x86: properly use map_domain_page() in miscellaneous places Jan Beulich
2013-01-22 10:57 ` [PATCH 10/11] tmem: partial adjustments for x86 16Tb support Jan Beulich
2013-01-22 17:55   ` Dan Magenheimer
2013-01-22 10:57 ` [PATCH 11/11] x86: support up to 16Tb Jan Beulich
2013-01-22 15:20   ` Dan Magenheimer
2013-01-22 15:31     ` Jan Beulich
2013-01-22 10:58 ` [PATCH 12/11] x86: debugging code for testing 16Tb support on smaller memory systems Jan Beulich
2013-01-23 14:26   ` [PATCH v2] " Jan Beulich
2013-01-23 15:18     ` Keir Fraser
2013-01-24 11:36     ` Tim Deegan
2013-01-24 12:23       ` Jan Beulich
2013-01-24 12:36         ` Tim Deegan
2013-01-22 20:13 ` [PATCH 00/11] x86: support up to 16Tb Keir Fraser
2013-01-23  9:33 ` Keir Fraser
2013-01-23  9:56   ` Jan Beulich
2013-01-23 10:16     ` Keir Fraser

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=50FE7D7402000078000B8323@nat28.tlf.novell.com \
    --to=jbeulich@suse.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.