All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Kevin Tian <kevin.tian@intel.com>, Keir Fraser <keir@xen.org>,
	Jun Nakajima <jun.nakajima@intel.com>
Subject: [PATCH v2] VMX: allocate APIC access page from domain heap
Date: Fri, 18 Dec 2015 00:50:03 -0700	[thread overview]
Message-ID: <5673C8BB02000078000C0FEB@prv-mh.provo.novell.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 3910 bytes --]

... since we don't need its virtual address anywhere (it's a
placeholder page only after all). For this to work (and possibly be
done elsewhere too) share_xen_page_with_guest() needs to mark pages
handed to it as Xen heap ones.

To be on the safe side, also explicitly clear the page (not having done
so was okay due to the XSA-100 fix, but is still a latent bug since we
don't formally guarantee allocations to come out zeroed, and in fact
this property may disappear again as soon as the asynchronous runtime
scrubbing patches arrive).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Introduce free_shared_domheap_page().
---
Alternatives might be to use a
- global page across VMs (on the basis that VMs shouldn't be accessing
  that page anyway)
- fake MFN pointing into nowhere (would need to ensure no side effects
  can occur, like PCIe errors or NMIs)

--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2489,18 +2489,21 @@ gp_fault:
 
 static int vmx_alloc_vlapic_mapping(struct domain *d)
 {
-    void *apic_va;
+    struct page_info *pg;
+    unsigned long mfn;
 
     if ( !cpu_has_vmx_virtualize_apic_accesses )
         return 0;
 
-    apic_va = alloc_xenheap_page();
-    if ( apic_va == NULL )
+    pg = alloc_domheap_page(d, MEMF_no_owner);
+    if ( !pg )
         return -ENOMEM;
-    share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable);
-    d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va);
-    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
-        _mfn(virt_to_mfn(apic_va)), p2m_get_hostp2m(d)->default_access);
+    mfn = page_to_mfn(pg);
+    clear_domain_page(_mfn(mfn));
+    share_xen_page_with_guest(pg, d, XENSHARE_writable);
+    d->arch.hvm_domain.vmx.apic_access_mfn = mfn;
+    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(mfn),
+                       p2m_get_hostp2m(d)->default_access);
 
     return 0;
 }
@@ -2508,8 +2511,9 @@ static int vmx_alloc_vlapic_mapping(stru
 static void vmx_free_vlapic_mapping(struct domain *d)
 {
     unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
+
     if ( mfn != 0 )
-        free_xenheap_page(mfn_to_virt(mfn));
+        free_shared_domheap_page(mfn_to_page(mfn));
 }
 
 static void vmx_install_vlapic_mapping(struct vcpu *v)
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -454,7 +454,7 @@ void share_xen_page_with_guest(
     /* Only add to the allocation list if the domain isn't dying. */
     if ( !d->is_dying )
     {
-        page->count_info |= PGC_allocated | 1;
+        page->count_info |= PGC_xen_heap | PGC_allocated | 1;
         if ( unlikely(d->xenheap_pages++ == 0) )
             get_knownalive_domain(d);
         page_list_add_tail(page, &d->xenpage_list);
@@ -469,6 +469,17 @@ void share_xen_page_with_privileged_gues
     share_xen_page_with_guest(page, dom_xen, readonly);
 }
 
+void free_shared_domheap_page(struct page_info *page)
+{
+    if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+        put_page(page);
+    if ( !test_and_clear_bit(_PGC_xen_heap, &page->count_info) )
+        ASSERT_UNREACHABLE();
+    page->u.inuse.type_info = 0;
+    page_set_owner(page, NULL);
+    free_domheap_page(page);
+}
+
 void make_cr3(struct vcpu *v, unsigned long mfn)
 {
     v->arch.cr3 = mfn << PAGE_SHIFT;
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -276,6 +276,7 @@ extern void share_xen_page_with_guest(
     struct page_info *page, struct domain *d, int readonly);
 extern void share_xen_page_with_privileged_guests(
     struct page_info *page, int readonly);
+extern void free_shared_domheap_page(struct page_info *page);
 
 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
 #define spage_table ((struct spage_info *)SPAGETABLE_VIRT_START)




[-- Attachment #2: VMX-domheap-APIC-access-page.patch --]
[-- Type: text/plain, Size: 3955 bytes --]

VMX: allocate APIC access page from domain heap

... since we don't need its virtual address anywhere (it's a
placeholder page only after all). For this to work (and possibly be
done elsewhere too) share_xen_page_with_guest() needs to mark pages
handed to it as Xen heap ones.

To be on the safe side, also explicitly clear the page (not having done
so was okay due to the XSA-100 fix, but is still a latent bug since we
don't formally guarantee allocations to come out zeroed, and in fact
this property may disappear again as soon as the asynchronous runtime
scrubbing patches arrive).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Introduce free_shared_domheap_page().
---
Alternatives might be to use a
- global page across VMs (on the basis that VMs shouldn't be accessing
  that page anyway)
- fake MFN pointing into nowhere (would need to ensure no side effects
  can occur, like PCIe errors or NMIs)

--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2489,18 +2489,21 @@ gp_fault:
 
 static int vmx_alloc_vlapic_mapping(struct domain *d)
 {
-    void *apic_va;
+    struct page_info *pg;
+    unsigned long mfn;
 
     if ( !cpu_has_vmx_virtualize_apic_accesses )
         return 0;
 
-    apic_va = alloc_xenheap_page();
-    if ( apic_va == NULL )
+    pg = alloc_domheap_page(d, MEMF_no_owner);
+    if ( !pg )
         return -ENOMEM;
-    share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable);
-    d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va);
-    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
-        _mfn(virt_to_mfn(apic_va)), p2m_get_hostp2m(d)->default_access);
+    mfn = page_to_mfn(pg);
+    clear_domain_page(_mfn(mfn));
+    share_xen_page_with_guest(pg, d, XENSHARE_writable);
+    d->arch.hvm_domain.vmx.apic_access_mfn = mfn;
+    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(mfn),
+                       p2m_get_hostp2m(d)->default_access);
 
     return 0;
 }
@@ -2508,8 +2511,9 @@ static int vmx_alloc_vlapic_mapping(stru
 static void vmx_free_vlapic_mapping(struct domain *d)
 {
     unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
+
     if ( mfn != 0 )
-        free_xenheap_page(mfn_to_virt(mfn));
+        free_shared_domheap_page(mfn_to_page(mfn));
 }
 
 static void vmx_install_vlapic_mapping(struct vcpu *v)
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -454,7 +454,7 @@ void share_xen_page_with_guest(
     /* Only add to the allocation list if the domain isn't dying. */
     if ( !d->is_dying )
     {
-        page->count_info |= PGC_allocated | 1;
+        page->count_info |= PGC_xen_heap | PGC_allocated | 1;
         if ( unlikely(d->xenheap_pages++ == 0) )
             get_knownalive_domain(d);
         page_list_add_tail(page, &d->xenpage_list);
@@ -469,6 +469,17 @@ void share_xen_page_with_privileged_gues
     share_xen_page_with_guest(page, dom_xen, readonly);
 }
 
+void free_shared_domheap_page(struct page_info *page)
+{
+    if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+        put_page(page);
+    if ( !test_and_clear_bit(_PGC_xen_heap, &page->count_info) )
+        ASSERT_UNREACHABLE();
+    page->u.inuse.type_info = 0;
+    page_set_owner(page, NULL);
+    free_domheap_page(page);
+}
+
 void make_cr3(struct vcpu *v, unsigned long mfn)
 {
     v->arch.cr3 = mfn << PAGE_SHIFT;
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -276,6 +276,7 @@ extern void share_xen_page_with_guest(
     struct page_info *page, struct domain *d, int readonly);
 extern void share_xen_page_with_privileged_guests(
     struct page_info *page, int readonly);
+extern void free_shared_domheap_page(struct page_info *page);
 
 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
 #define spage_table ((struct spage_info *)SPAGETABLE_VIRT_START)

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

             reply	other threads:[~2015-12-18  7:50 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-18  7:50 Jan Beulich [this message]
2015-12-18 15:18 ` [PATCH v2] VMX: allocate APIC access page from domain heap Andrew Cooper
2015-12-20  6:57 ` Tian, Kevin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5673C8BB02000078000C0FEB@prv-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.