All of lore.kernel.org
 help / color / mirror / Atom feed
* [V6 PATCH 0/7]: PVH dom0....
@ 2013-12-06  2:38 Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 1/7] pvh dom0: move some pv specific code to static functions Mukesh Rathor
                   ` (6 more replies)
  0 siblings, 7 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

Hi,

V6: The only change from V5 is in patch #6:
     - changed comment to reflect autoxlate
     - removed a redundant ASSERT
     - reworked logic a bit so that get_page_from_gfn() is called with NULL
       for p2m type as before. arm has ASSERT wanting it to be NULL.

Tim: patch 4 needs your approval.
Daniel:  patch 5 needs your approval.

These patches implement PVH dom0.

Patches 1 and 2 implement changes in and around construct_dom0.
Patch 7 adds option to boot a dom0 in PVH mode. The rest support tool
stack on a pvh dom0.

These patches are based on c/s: 4b07b3cbf29f66da6090d52e75b5fdae592c6441

These can also be found in public git tree at:

     git://oss.oracle.com/git/mrathor/xen.git  branch: dom0pvh-v6

Thanks for all the help,
Mukesh

^ permalink raw reply	[flat|nested] 46+ messages in thread

* [V6 PATCH 1/7] pvh dom0: move some pv specific code to static functions
  2013-12-06  2:38 [V6 PATCH 0/7]: PVH dom0 Mukesh Rathor
@ 2013-12-06  2:38 ` Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 2/7] pvh dom0: construct_dom0 changes Mukesh Rathor
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

In this preparatory patch , some pv specific code is
carved out into static functions. No functionality change.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/domain_build.c |  347 +++++++++++++++++++++++--------------------
 1 files changed, 188 insertions(+), 159 deletions(-)

diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 84ce392..f3ae9ad 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -307,6 +307,187 @@ static void __init process_dom0_ioports_disable(void)
     }
 }
 
+static __init void mark_pv_pt_pages_rdonly(struct domain *d,
+                                           l4_pgentry_t *l4start,
+                                           unsigned long vpt_start,
+                                           unsigned long nr_pt_pages)
+{
+    unsigned long count;
+    struct page_info *page;
+    l4_pgentry_t *pl4e;
+    l3_pgentry_t *pl3e;
+    l2_pgentry_t *pl2e;
+    l1_pgentry_t *pl1e;
+
+    pl4e = l4start + l4_table_offset(vpt_start);
+    pl3e = l4e_to_l3e(*pl4e);
+    pl3e += l3_table_offset(vpt_start);
+    pl2e = l3e_to_l2e(*pl3e);
+    pl2e += l2_table_offset(vpt_start);
+    pl1e = l2e_to_l1e(*pl2e);
+    pl1e += l1_table_offset(vpt_start);
+    for ( count = 0; count < nr_pt_pages; count++ )
+    {
+        l1e_remove_flags(*pl1e, _PAGE_RW);
+        page = mfn_to_page(l1e_get_pfn(*pl1e));
+
+        /* Read-only mapping + PGC_allocated + page-table page. */
+        page->count_info         = PGC_allocated | 3;
+        page->u.inuse.type_info |= PGT_validated | 1;
+
+        /* Top-level p.t. is pinned. */
+        if ( (page->u.inuse.type_info & PGT_type_mask) ==
+             (!is_pv_32on64_domain(d) ?
+              PGT_l4_page_table : PGT_l3_page_table) )
+        {
+            page->count_info        += 1;
+            page->u.inuse.type_info += 1 | PGT_pinned;
+        }
+
+        /* Iterate. */
+        if ( !((unsigned long)++pl1e & (PAGE_SIZE - 1)) )
+        {
+            if ( !((unsigned long)++pl2e & (PAGE_SIZE - 1)) )
+            {
+                if ( !((unsigned long)++pl3e & (PAGE_SIZE - 1)) )
+                    pl3e = l4e_to_l3e(*++pl4e);
+                pl2e = l3e_to_l2e(*pl3e);
+            }
+            pl1e = l2e_to_l1e(*pl2e);
+        }
+    }
+}
+
+static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
+                                    unsigned long v_start, unsigned long v_end,
+                                    unsigned long vphysmap_start,
+                                    unsigned long vphysmap_end,
+                                    unsigned long nr_pages)
+{
+    struct page_info *page = NULL;
+    l4_pgentry_t *pl4e, *l4start = map_domain_page(pgtbl_pfn);
+    l3_pgentry_t *pl3e = NULL;
+    l2_pgentry_t *pl2e = NULL;
+    l1_pgentry_t *pl1e = NULL;
+
+    if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
+        panic("DOM0 P->M table overlaps initial mapping");
+
+    while ( vphysmap_start < vphysmap_end )
+    {
+        if ( d->tot_pages + ((round_pgup(vphysmap_end) - vphysmap_start)
+                             >> PAGE_SHIFT) + 3 > nr_pages )
+            panic("Dom0 allocation too small for initial P->M table");
+
+        if ( pl1e )
+        {
+            unmap_domain_page(pl1e);
+            pl1e = NULL;
+        }
+        if ( pl2e )
+        {
+            unmap_domain_page(pl2e);
+            pl2e = NULL;
+        }
+        if ( pl3e )
+        {
+            unmap_domain_page(pl3e);
+            pl3e = NULL;
+        }
+        pl4e = l4start + l4_table_offset(vphysmap_start);
+        if ( !l4e_get_intpte(*pl4e) )
+        {
+            page = alloc_domheap_page(d, 0);
+            if ( !page )
+                break;
+
+            /* No mapping, PGC_allocated + page-table page. */
+            page->count_info = PGC_allocated | 2;
+            page->u.inuse.type_info = PGT_l3_page_table | PGT_validated | 1;
+            pl3e = __map_domain_page(page);
+            clear_page(pl3e);
+            *pl4e = l4e_from_page(page, L4_PROT);
+        } else
+            pl3e = map_domain_page(l4e_get_pfn(*pl4e));
+
+        pl3e += l3_table_offset(vphysmap_start);
+        if ( !l3e_get_intpte(*pl3e) )
+        {
+            if ( cpu_has_page1gb &&
+                 !(vphysmap_start & ((1UL << L3_PAGETABLE_SHIFT) - 1)) &&
+                 vphysmap_end >= vphysmap_start + (1UL << L3_PAGETABLE_SHIFT) &&
+                 (page = alloc_domheap_pages(d,
+                                             L3_PAGETABLE_SHIFT - PAGE_SHIFT,
+                                             0)) != NULL )
+            {
+                *pl3e = l3e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
+                vphysmap_start += 1UL << L3_PAGETABLE_SHIFT;
+                continue;
+            }
+            if ( (page = alloc_domheap_page(d, 0)) == NULL )
+                break;
+
+            /* No mapping, PGC_allocated + page-table page. */
+            page->count_info = PGC_allocated | 2;
+            page->u.inuse.type_info = PGT_l2_page_table | PGT_validated | 1;
+            pl2e = __map_domain_page(page);
+            clear_page(pl2e);
+            *pl3e = l3e_from_page(page, L3_PROT);
+        }
+        else
+           pl2e = map_domain_page(l3e_get_pfn(*pl3e));
+
+        pl2e += l2_table_offset(vphysmap_start);
+        if ( !l2e_get_intpte(*pl2e) )
+        {
+            if ( !(vphysmap_start & ((1UL << L2_PAGETABLE_SHIFT) - 1)) &&
+                 vphysmap_end >= vphysmap_start + (1UL << L2_PAGETABLE_SHIFT) &&
+                 (page = alloc_domheap_pages(d,
+                                             L2_PAGETABLE_SHIFT - PAGE_SHIFT,
+                                             0)) != NULL )
+            {
+                *pl2e = l2e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
+                if ( opt_allow_superpage )
+                    get_superpage(page_to_mfn(page), d);
+                vphysmap_start += 1UL << L2_PAGETABLE_SHIFT;
+                continue;
+            }
+            if ( (page = alloc_domheap_page(d, 0)) == NULL )
+                break;
+
+            /* No mapping, PGC_allocated + page-table page. */
+            page->count_info = PGC_allocated | 2;
+            page->u.inuse.type_info = PGT_l1_page_table | PGT_validated | 1;
+            pl1e = __map_domain_page(page);
+            clear_page(pl1e);
+            *pl2e = l2e_from_page(page, L2_PROT);
+        }
+        else
+            pl1e = map_domain_page(l2e_get_pfn(*pl2e));
+
+        pl1e += l1_table_offset(vphysmap_start);
+        BUG_ON(l1e_get_intpte(*pl1e));
+        page = alloc_domheap_page(d, 0);
+        if ( !page )
+            break;
+
+        *pl1e = l1e_from_page(page, L1_PROT|_PAGE_DIRTY);
+        vphysmap_start += PAGE_SIZE;
+        vphysmap_start &= PAGE_MASK;
+    }
+    if ( !page )
+        panic("Not enough RAM for DOM0 P->M table");
+
+    if ( pl1e )
+        unmap_domain_page(pl1e);
+    if ( pl2e )
+        unmap_domain_page(pl2e);
+    if ( pl3e )
+        unmap_domain_page(pl3e);
+
+    unmap_domain_page(l4start);
+}
+
 int __init construct_dom0(
     struct domain *d,
     const module_t *image, unsigned long image_headroom,
@@ -706,43 +887,8 @@ int __init construct_dom0(
     }
 
     /* Pages that are part of page tables must be read only. */
-    l4tab = l4start + l4_table_offset(vpt_start);
-    l3start = l3tab = l4e_to_l3e(*l4tab);
-    l3tab += l3_table_offset(vpt_start);
-    l2start = l2tab = l3e_to_l2e(*l3tab);
-    l2tab += l2_table_offset(vpt_start);
-    l1start = l1tab = l2e_to_l1e(*l2tab);
-    l1tab += l1_table_offset(vpt_start);
-    for ( count = 0; count < nr_pt_pages; count++ ) 
-    {
-        l1e_remove_flags(*l1tab, _PAGE_RW);
-        page = mfn_to_page(l1e_get_pfn(*l1tab));
-
-        /* Read-only mapping + PGC_allocated + page-table page. */
-        page->count_info         = PGC_allocated | 3;
-        page->u.inuse.type_info |= PGT_validated | 1;
-
-        /* Top-level p.t. is pinned. */
-        if ( (page->u.inuse.type_info & PGT_type_mask) ==
-             (!is_pv_32on64_domain(d) ?
-              PGT_l4_page_table : PGT_l3_page_table) )
-        {
-            page->count_info        += 1;
-            page->u.inuse.type_info += 1 | PGT_pinned;
-        }
-
-        /* Iterate. */
-        if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
-        {
-            if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
-            {
-                if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
-                    l3start = l3tab = l4e_to_l3e(*++l4tab);
-                l2start = l2tab = l3e_to_l2e(*l3tab);
-            }
-            l1start = l1tab = l2e_to_l1e(*l2tab);
-        }
-    }
+    if  ( is_pv_domain(d) )
+        mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages);
 
     /* Mask all upcalls... */
     for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
@@ -814,132 +960,15 @@ int __init construct_dom0(
              elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : "");
 
     count = d->tot_pages;
-    l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
-    l3tab = NULL;
-    l2tab = NULL;
-    l1tab = NULL;
+
     /* Set up the phys->machine table if not part of the initial mapping. */
-    if ( parms.p2m_base != UNSET_ADDR )
+    if ( is_pv_domain(d) && parms.p2m_base != UNSET_ADDR )
     {
-        unsigned long va = vphysmap_start;
-
-        if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
-            panic("DOM0 P->M table overlaps initial mapping");
-
-        while ( va < vphysmap_end )
-        {
-            if ( d->tot_pages + ((round_pgup(vphysmap_end) - va)
-                                 >> PAGE_SHIFT) + 3 > nr_pages )
-                panic("Dom0 allocation too small for initial P->M table");
-
-            if ( l1tab )
-            {
-                unmap_domain_page(l1tab);
-                l1tab = NULL;
-            }
-            if ( l2tab )
-            {
-                unmap_domain_page(l2tab);
-                l2tab = NULL;
-            }
-            if ( l3tab )
-            {
-                unmap_domain_page(l3tab);
-                l3tab = NULL;
-            }
-            l4tab = l4start + l4_table_offset(va);
-            if ( !l4e_get_intpte(*l4tab) )
-            {
-                page = alloc_domheap_page(d, 0);
-                if ( !page )
-                    break;
-                /* No mapping, PGC_allocated + page-table page. */
-                page->count_info = PGC_allocated | 2;
-                page->u.inuse.type_info =
-                    PGT_l3_page_table | PGT_validated | 1;
-                l3tab = __map_domain_page(page);
-                clear_page(l3tab);
-                *l4tab = l4e_from_page(page, L4_PROT);
-            } else
-                l3tab = map_domain_page(l4e_get_pfn(*l4tab));
-            l3tab += l3_table_offset(va);
-            if ( !l3e_get_intpte(*l3tab) )
-            {
-                if ( cpu_has_page1gb &&
-                     !(va & ((1UL << L3_PAGETABLE_SHIFT) - 1)) &&
-                     vphysmap_end >= va + (1UL << L3_PAGETABLE_SHIFT) &&
-                     (page = alloc_domheap_pages(d,
-                                                 L3_PAGETABLE_SHIFT -
-                                                     PAGE_SHIFT,
-                                                 0)) != NULL )
-                {
-                    *l3tab = l3e_from_page(page,
-                                           L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
-                    va += 1UL << L3_PAGETABLE_SHIFT;
-                    continue;
-                }
-                if ( (page = alloc_domheap_page(d, 0)) == NULL )
-                    break;
-                /* No mapping, PGC_allocated + page-table page. */
-                page->count_info = PGC_allocated | 2;
-                page->u.inuse.type_info =
-                    PGT_l2_page_table | PGT_validated | 1;
-                l2tab = __map_domain_page(page);
-                clear_page(l2tab);
-                *l3tab = l3e_from_page(page, L3_PROT);
-            }
-            else
-               l2tab = map_domain_page(l3e_get_pfn(*l3tab));
-            l2tab += l2_table_offset(va);
-            if ( !l2e_get_intpte(*l2tab) )
-            {
-                if ( !(va & ((1UL << L2_PAGETABLE_SHIFT) - 1)) &&
-                     vphysmap_end >= va + (1UL << L2_PAGETABLE_SHIFT) &&
-                     (page = alloc_domheap_pages(d,
-                                                 L2_PAGETABLE_SHIFT -
-                                                     PAGE_SHIFT,
-                                                 0)) != NULL )
-                {
-                    *l2tab = l2e_from_page(page,
-                                           L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
-                    if ( opt_allow_superpage )
-                        get_superpage(page_to_mfn(page), d);
-                    va += 1UL << L2_PAGETABLE_SHIFT;
-                    continue;
-                }
-                if ( (page = alloc_domheap_page(d, 0)) == NULL )
-                    break;
-                /* No mapping, PGC_allocated + page-table page. */
-                page->count_info = PGC_allocated | 2;
-                page->u.inuse.type_info =
-                    PGT_l1_page_table | PGT_validated | 1;
-                l1tab = __map_domain_page(page);
-                clear_page(l1tab);
-                *l2tab = l2e_from_page(page, L2_PROT);
-            }
-            else
-                l1tab = map_domain_page(l2e_get_pfn(*l2tab));
-            l1tab += l1_table_offset(va);
-            BUG_ON(l1e_get_intpte(*l1tab));
-            page = alloc_domheap_page(d, 0);
-            if ( !page )
-                break;
-            *l1tab = l1e_from_page(page, L1_PROT|_PAGE_DIRTY);
-            va += PAGE_SIZE;
-            va &= PAGE_MASK;
-        }
-        if ( !page )
-            panic("Not enough RAM for DOM0 P->M table");
+        pfn = pagetable_get_pfn(v->arch.guest_table);
+        setup_pv_physmap(d, pfn, v_start, v_end, vphysmap_start, vphysmap_end,
+                         nr_pages);
     }
 
-    if ( l1tab )
-        unmap_domain_page(l1tab);
-    if ( l2tab )
-        unmap_domain_page(l2tab);
-    if ( l3tab )
-        unmap_domain_page(l3tab);
-    unmap_domain_page(l4start);
-
     /* Write the phys->machine and machine->phys table entries. */
     for ( pfn = 0; pfn < count; pfn++ )
     {
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [V6 PATCH 2/7] pvh dom0: construct_dom0 changes
  2013-12-06  2:38 [V6 PATCH 0/7]: PVH dom0 Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 1/7] pvh dom0: move some pv specific code to static functions Mukesh Rathor
@ 2013-12-06  2:38 ` Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

This patch changes construct_dom0 to boot in PVH mode. Changes
need to support it are also included here.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/domain_build.c |  235 +++++++++++++++++++++++++++++++++++++++---
 xen/arch/x86/mm/hap/hap.c   |   15 +++
 xen/include/asm-x86/hap.h   |    1 +
 3 files changed, 234 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index f3ae9ad..a563c61 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -35,6 +35,7 @@
 #include <asm/setup.h>
 #include <asm/bzimage.h> /* for bzimage_parse */
 #include <asm/io_apic.h>
+#include <asm/hap.h>
 
 #include <public/version.h>
 
@@ -307,6 +308,151 @@ static void __init process_dom0_ioports_disable(void)
     }
 }
 
+static __init void pvh_add_mem_mapping(struct domain *d, unsigned long gfn,
+                                       unsigned long mfn, unsigned long nr_mfns)
+{
+    unsigned long i;
+    for ( i = 0; i < nr_mfns; i++ )
+        if ( !set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i)) )
+            panic("Failed setting p2m. gfn:%lx mfn:%lx i:%ld\n", gfn, mfn, i);
+}
+
+/*
+ * Set the 1:1 map for all non-RAM regions for dom 0. Thus, dom0 will have
+ * the entire io region mapped in the EPT/NPT.
+ *
+ * pvh fixme: The following doesn't map MMIO ranges when they sit above the
+ *            highest E820 covered address.
+ */
+static __init void pvh_map_all_iomem(struct domain *d)
+{
+    unsigned long start_pfn, end_pfn, end = 0, start = 0;
+    const struct e820entry *entry;
+    unsigned int i, nump;
+
+    for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
+    {
+        end = entry->addr + entry->size;
+
+        if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE ||
+             i == e820.nr_map - 1 )
+        {
+            start_pfn = PFN_DOWN(start);
+
+            /* Unused RAM areas are marked UNUSABLE, so skip it too */
+            if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE )
+                end_pfn = PFN_UP(entry->addr);
+            else
+                end_pfn = PFN_UP(end);
+
+            if ( start_pfn < end_pfn )
+            {
+                nump = end_pfn - start_pfn;
+                /* Add pages to the mapping */
+                pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
+            }
+            start = end;
+        }
+    }
+
+    /* If the e820 ended under 4GB, we must map the remaining space upto 4GB */
+    if ( end < GB(4) )
+    {
+        start_pfn = PFN_UP(end);
+        end_pfn = (GB(4)) >> PAGE_SHIFT;
+        nump = end_pfn - start_pfn;
+        pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
+    }
+}
+
+static __init void dom0_update_physmap(struct domain *d, unsigned long pfn,
+                                   unsigned long mfn, unsigned long vphysmap_s)
+{
+    if ( is_pvh_domain(d) )
+    {
+        int rc = guest_physmap_add_page(d, pfn, mfn, 0);
+        BUG_ON(rc);
+        return;
+    }
+    if ( !is_pv_32on64_domain(d) )
+        ((unsigned long *)vphysmap_s)[pfn] = mfn;
+    else
+        ((unsigned int *)vphysmap_s)[pfn] = mfn;
+
+    set_gpfn_from_mfn(mfn, pfn);
+}
+
+static __init void pvh_fixup_page_tables_for_hap(struct vcpu *v,
+                                                 unsigned long v_start,
+                                                 unsigned long v_end)
+{
+    int i, j, k;
+    l4_pgentry_t *pl4e, *l4start;
+    l3_pgentry_t *pl3e;
+    l2_pgentry_t *pl2e;
+    l1_pgentry_t *pl1e;
+    unsigned long cr3_pfn;
+
+    ASSERT(paging_mode_enabled(v->domain));
+
+    l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+
+    /* Clear entries prior to guest L4 start */
+    pl4e = l4start + l4_table_offset(v_start);
+    memset(l4start, 0, (unsigned long)pl4e - (unsigned long)l4start);
+
+    for ( ; pl4e <= l4start + l4_table_offset(v_end - 1); pl4e++ )
+    {
+        pl3e = map_l3t_from_l4e(*pl4e);
+        for ( i = 0; i < PAGE_SIZE / sizeof(*pl3e); i++, pl3e++ )
+        {
+            if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
+                continue;
+
+            pl2e = map_l2t_from_l3e(*pl3e);
+            for ( j = 0; j < PAGE_SIZE / sizeof(*pl2e); j++, pl2e++ )
+            {
+                if ( !(l2e_get_flags(*pl2e)  & _PAGE_PRESENT) )
+                    continue;
+
+                pl1e = map_l1t_from_l2e(*pl2e);
+                for ( k = 0; k < PAGE_SIZE / sizeof(*pl1e); k++, pl1e++ )
+                {
+                    if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
+                        continue;
+
+                    *pl1e = l1e_from_pfn(get_gpfn_from_mfn(l1e_get_pfn(*pl1e)),
+                                         l1e_get_flags(*pl1e));
+                }
+                unmap_domain_page(pl1e);
+                *pl2e = l2e_from_pfn(get_gpfn_from_mfn(l2e_get_pfn(*pl2e)),
+                                     l2e_get_flags(*pl2e));
+            }
+            unmap_domain_page(pl2e);
+            *pl3e = l3e_from_pfn(get_gpfn_from_mfn(l3e_get_pfn(*pl3e)),
+                                 l3e_get_flags(*pl3e));
+        }
+        unmap_domain_page(pl3e);
+        *pl4e = l4e_from_pfn(get_gpfn_from_mfn(l4e_get_pfn(*pl4e)),
+                             l4e_get_flags(*pl4e));
+    }
+
+    /* Clear entries post guest L4. */
+    if ( (unsigned long)pl4e & (PAGE_SIZE - 1) )
+        memset(pl4e, 0, PAGE_SIZE - ((unsigned long)pl4e & (PAGE_SIZE - 1)));
+
+    unmap_domain_page(l4start);
+
+    cr3_pfn = get_gpfn_from_mfn(paddr_to_pfn(v->arch.cr3));
+    v->arch.hvm_vcpu.guest_cr[3] = pfn_to_paddr(cr3_pfn);
+
+    /*
+     * Finally, we update the paging modes (hap_update_paging_modes). This will
+     * create monitor_table for us, update v->arch.cr3, and update vmcs.cr3.
+     */
+    paging_update_paging_modes(v);
+}
+
 static __init void mark_pv_pt_pages_rdonly(struct domain *d,
                                            l4_pgentry_t *l4start,
                                            unsigned long vpt_start,
@@ -516,6 +662,8 @@ int __init construct_dom0(
     l3_pgentry_t *l3tab = NULL, *l3start = NULL;
     l2_pgentry_t *l2tab = NULL, *l2start = NULL;
     l1_pgentry_t *l1tab = NULL, *l1start = NULL;
+    paddr_t shared_info_paddr = 0;
+    u32 save_pvh_pg_mode = 0;
 
     /*
      * This fully describes the memory layout of the initial domain. All 
@@ -593,12 +741,21 @@ int __init construct_dom0(
         goto out;
     }
 
-    if ( parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE &&
-         !test_bit(XENFEAT_dom0, parms.f_supported) )
+    if ( parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE )
     {
-        printk("Kernel does not support Dom0 operation\n");
-        rc = -EINVAL;
-        goto out;
+        if ( !test_bit(XENFEAT_dom0, parms.f_supported) )
+        {
+            printk("Kernel does not support Dom0 operation\n");
+            rc = -EINVAL;
+            goto out;
+        }
+        if ( is_pvh_domain(d) &&
+             !test_bit(XENFEAT_hvm_callback_vector, parms.f_supported) )
+        {
+            printk("Kernel does not support PVH mode\n");
+            rc = -EINVAL;
+            goto out;
+        }
     }
 
     if ( compat32 )
@@ -663,6 +820,13 @@ int __init construct_dom0(
     vstartinfo_end   = (vstartinfo_start +
                         sizeof(struct start_info) +
                         sizeof(struct dom0_vga_console_info));
+
+    if ( is_pvh_domain(d) )
+    {
+        shared_info_paddr = round_pgup(vstartinfo_end) - v_start;
+        vstartinfo_end   += PAGE_SIZE;
+    }
+
     vpt_start        = round_pgup(vstartinfo_end);
     for ( nr_pt_pages = 2; ; nr_pt_pages++ )
     {
@@ -903,6 +1067,13 @@ int __init construct_dom0(
         (void)alloc_vcpu(d, i, cpu);
     }
 
+    /*
+     * pvh: we temporarily disable paging mode so that we can build cr3 needed
+     * to run on dom0's page tables.
+     */
+    save_pvh_pg_mode = d->arch.paging.mode;
+    d->arch.paging.mode = 0;
+
     /* Set up CR3 value for write_ptbase */
     if ( paging_mode_enabled(d) )
         paging_update_paging_modes(v);
@@ -969,6 +1140,15 @@ int __init construct_dom0(
                          nr_pages);
     }
 
+    if ( is_pvh_domain(d) )
+        hap_set_pvh_alloc_for_dom0(d, nr_pages);
+
+    /*
+     * We enable paging mode again so guest_physmap_add_page will do the
+     * right thing for us.
+     */
+    d->arch.paging.mode = save_pvh_pg_mode;
+
     /* Write the phys->machine and machine->phys table entries. */
     for ( pfn = 0; pfn < count; pfn++ )
     {
@@ -985,11 +1165,7 @@ int __init construct_dom0(
         if ( pfn > REVERSE_START && (vinitrd_start || pfn < initrd_pfn) )
             mfn = alloc_epfn - (pfn - REVERSE_START);
 #endif
-        if ( !is_pv_32on64_domain(d) )
-            ((unsigned long *)vphysmap_start)[pfn] = mfn;
-        else
-            ((unsigned int *)vphysmap_start)[pfn] = mfn;
-        set_gpfn_from_mfn(mfn, pfn);
+        dom0_update_physmap(d, pfn, mfn, vphysmap_start);
         if (!(pfn & 0xfffff))
             process_pending_softirqs();
     }
@@ -1005,8 +1181,8 @@ int __init construct_dom0(
             if ( !page->u.inuse.type_info &&
                  !get_page_and_type(page, d, PGT_writable_page) )
                 BUG();
-            ((unsigned long *)vphysmap_start)[pfn] = mfn;
-            set_gpfn_from_mfn(mfn, pfn);
+
+            dom0_update_physmap(d, pfn, mfn, vphysmap_start);
             ++pfn;
             if (!(pfn & 0xfffff))
                 process_pending_softirqs();
@@ -1026,11 +1202,7 @@ int __init construct_dom0(
 #ifndef NDEBUG
 #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
 #endif
-            if ( !is_pv_32on64_domain(d) )
-                ((unsigned long *)vphysmap_start)[pfn] = mfn;
-            else
-                ((unsigned int *)vphysmap_start)[pfn] = mfn;
-            set_gpfn_from_mfn(mfn, pfn);
+            dom0_update_physmap(d, pfn, mfn, vphysmap_start);
 #undef pfn
             page++; pfn++;
             if (!(pfn & 0xfffff))
@@ -1054,6 +1226,15 @@ int __init construct_dom0(
         si->console.dom0.info_size = sizeof(struct dom0_vga_console_info);
     }
 
+    /*
+     * PVH: We need to update si->shared_info while we are on dom0 page tables,
+     * but need to defer the p2m update until after we have fixed up the
+     * page tables for PVH so that the m2p for the si pte entry returns
+     * correct pfn.
+     */
+    if ( is_pvh_domain(d) )
+        si->shared_info = shared_info_paddr;
+
     if ( is_pv_32on64_domain(d) )
         xlat_start_info(si, XLAT_start_info_console_dom0);
 
@@ -1087,8 +1268,15 @@ int __init construct_dom0(
     regs->eflags = X86_EFLAGS_IF;
 
     if ( opt_dom0_shadow )
+    {
+        if ( is_pvh_domain(d) )
+        {
+            printk("Unsupported option dom0_shadow for PVH\n");
+            return -EINVAL;
+        }
         if ( paging_enable(d, PG_SH_enable) == 0 ) 
             paging_update_paging_modes(v);
+    }
 
     if ( supervisor_mode_kernel )
     {
@@ -1178,6 +1366,19 @@ int __init construct_dom0(
         printk(" Xen warning: dom0 kernel broken ELF: %s\n",
                elf_check_broken(&elf));
 
+    if ( is_pvh_domain(d) )
+    {
+        /* finally, fixup the page table, replacing mfns with pfns */
+        pvh_fixup_page_tables_for_hap(v, v_start, v_end);
+
+        /* the pt has correct pfn for si, now update the mfn in the p2m */
+        mfn = virt_to_mfn(d->shared_info);
+        pfn = shared_info_paddr >> PAGE_SHIFT;
+        dom0_update_physmap(d, pfn, mfn, 0);
+
+        pvh_map_all_iomem(d);
+    }
+
     iommu_dom0_init(dom0);
     return 0;
 
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index d3f64bd..cc3ba66 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -579,6 +579,21 @@ int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
     }
 }
 
+void __init hap_set_pvh_alloc_for_dom0(struct domain *d,
+                                       unsigned long num_pages)
+{
+    int rc;
+    unsigned long memkb = num_pages * (PAGE_SIZE / 1024);
+
+    /* Copied from: libxl_get_required_shadow_memory() */
+    memkb = 4 * (256 * d->max_vcpus + 2 * (memkb / 1024));
+    num_pages = ( (memkb + 1023) / 1024) << (20 - PAGE_SHIFT);
+    paging_lock(d);
+    rc = hap_set_allocation(d, num_pages, NULL);
+    paging_unlock(d);
+    BUG_ON(rc);
+}
+
 static const struct paging_mode hap_paging_real_mode;
 static const struct paging_mode hap_paging_protected_mode;
 static const struct paging_mode hap_paging_pae_mode;
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index e03f983..aab8558 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -63,6 +63,7 @@ int   hap_track_dirty_vram(struct domain *d,
                            XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
 
 extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
+void hap_set_pvh_alloc_for_dom0(struct domain *d, unsigned long num_pages);
 
 #endif /* XEN_HAP_H */
 
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [V6 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-06  2:38 [V6 PATCH 0/7]: PVH dom0 Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 1/7] pvh dom0: move some pv specific code to static functions Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 2/7] pvh dom0: construct_dom0 changes Mukesh Rathor
@ 2013-12-06  2:38 ` Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign Mukesh Rathor
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

This preparatory patch adds support for XENMEM_add_to_physmap_range
on x86 so it can be used to create a guest on PVH dom0. To this end, we
add a new function xenmem_add_to_physmap_range(), and change
xenmem_add_to_physmap_once parameters so it can be called from
xenmem_add_to_physmap_range.

Please note, compat will continue to return -ENOSYS.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c |   65 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 65 insertions(+), 0 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 6c26026..4ae4523 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4675,6 +4675,39 @@ static int xenmem_add_to_physmap(struct domain *d,
     return xenmem_add_to_physmap_once(d, xatp);
 }
 
+static int xenmem_add_to_physmap_range(struct domain *d,
+                                       struct xen_add_to_physmap_range *xatpr)
+{
+    /* Process entries in reverse order to allow continuations */
+    while ( xatpr->size > 0 )
+    {
+        int rc;
+        xen_ulong_t idx;
+        xen_pfn_t gpfn;
+        struct xen_add_to_physmap xatp;
+
+        if ( copy_from_guest_offset(&idx, xatpr->idxs, xatpr->size-1, 1)  ||
+             copy_from_guest_offset(&gpfn, xatpr->gpfns, xatpr->size-1, 1) )
+            return -EFAULT;
+
+        xatp.space = xatpr->space;
+        xatp.idx = idx;
+        xatp.gpfn = gpfn;
+        rc = xenmem_add_to_physmap_once(d, &xatp);
+
+        if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
+            return -EFAULT;
+
+        xatpr->size--;
+
+        /* Check for continuation if it's not the last interation */
+        if ( xatpr->size > 0 && hypercall_preempt_check() )
+            return -EAGAIN;
+    }
+
+    return 0;
+}
+
 long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     int rc;
@@ -4689,6 +4722,10 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( copy_from_guest(&xatp, arg, 1) )
             return -EFAULT;
 
+        /* This one is only supported for add_to_physmap_range for now */
+        if ( xatp.space == XENMAPSPACE_gmfn_foreign )
+            return -EINVAL;
+
         d = rcu_lock_domain_by_any_id(xatp.domid);
         if ( d == NULL )
             return -ESRCH;
@@ -4716,6 +4753,34 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         return rc;
     }
 
+    case XENMEM_add_to_physmap_range:
+    {
+        struct xen_add_to_physmap_range xatpr;
+        struct domain *d;
+
+        if ( copy_from_guest(&xatpr, arg, 1) )
+            return -EFAULT;
+
+        /* This mapspace is redundant for this hypercall */
+        if ( xatpr.space == XENMAPSPACE_gmfn_range )
+            return -EINVAL;
+
+        d = rcu_lock_domain_by_any_id(xatpr.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d)) == 0 )
+            rc = xenmem_add_to_physmap_range(d, &xatpr);
+
+        rcu_unlock_domain(d);
+
+        if ( rc == -EAGAIN )
+            rc = hypercall_create_continuation(
+                __HYPERVISOR_memory_op, "ih", op, arg);
+
+        return rc;
+    }
+
     case XENMEM_set_memory_map:
     {
         struct xen_foreign_memory_map fmap;
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [V6 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign
  2013-12-06  2:38 [V6 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (2 preceding siblings ...)
  2013-12-06  2:38 ` [V6 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
@ 2013-12-06  2:38 ` Mukesh Rathor
  2013-12-09 12:02   ` Tim Deegan
  2013-12-06  2:38 ` [V6 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

In this patch, a new type p2m_map_foreign is introduced for pages
that toolstack on PVH dom0 maps from foreign domains that its creating
or supporting during it's run time.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm/p2m-ept.c |    1 +
 xen/arch/x86/mm/p2m-pt.c  |    1 +
 xen/arch/x86/mm/p2m.c     |   28 ++++++++++++++++++++--------
 xen/include/asm-x86/p2m.h |    4 ++++
 4 files changed, 26 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 92d9e2d..08d1d72 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -75,6 +75,7 @@ static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_acces
             entry->w = 0;
             break;
         case p2m_grant_map_rw:
+        case p2m_map_foreign:
             entry->r = entry->w = 1;
             entry->x = 0;
             break;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index a1d5650..09b60ce 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -89,6 +89,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn)
     case p2m_ram_rw:
         return flags | P2M_BASE_FLAGS | _PAGE_RW;
     case p2m_grant_map_rw:
+    case p2m_map_foreign:
         return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
     case p2m_mmio_direct:
         if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 8f380ed..0659ef1 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -525,7 +525,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
         for ( i = 0; i < (1UL << page_order); i++ )
         {
             mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL);
-            if ( !p2m_is_grant(t) && !p2m_is_shared(t) )
+            if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
                 set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
@@ -756,10 +756,9 @@ void p2m_change_type_range(struct domain *d,
     p2m_unlock(p2m);
 }
 
-
-
-int
-set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+static int
+set_typed_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
+                    p2m_type_t gfn_p2mt)
 {
     int rc = 0;
     p2m_access_t a;
@@ -784,16 +783,29 @@ set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
         set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
     }
 
-    P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
-    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct, p2m->default_access);
+    P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn, mfn_x(mfn));
+    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, gfn_p2mt,
+                       p2m->default_access);
     gfn_unlock(p2m, gfn, 0);
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
-            "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+            "%s: set_p2m_entry failed! mfn=%08lx\n", __func__,
             mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
     return rc;
 }
 
+/* Returns: True for success. */
+int set_foreign_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+{
+    return set_typed_p2m_entry(d, gfn, mfn, p2m_map_foreign);
+}
+
+int
+set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+{
+    return set_typed_p2m_entry(d, gfn, mfn, p2m_mmio_direct);
+}
+
 int
 clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
 {
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 43583b2..6fc71a1 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -70,6 +70,7 @@ typedef enum {
     p2m_ram_paging_in = 11,       /* Memory that is being paged in */
     p2m_ram_shared = 12,          /* Shared or sharable memory */
     p2m_ram_broken = 13,          /* Broken page, access cause domain crash */
+    p2m_map_foreign  = 14,        /* ram pages from foreign domain */
 } p2m_type_t;
 
 /*
@@ -180,6 +181,7 @@ typedef unsigned int p2m_query_t;
 #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
 #define p2m_is_shared(_t)   (p2m_to_mask(_t) & P2M_SHARED_TYPES)
 #define p2m_is_broken(_t)   (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
+#define p2m_is_foreign(_t)  (p2m_to_mask(_t) & p2m_to_mask(p2m_map_foreign))
 
 /* Per-p2m-table state */
 struct p2m_domain {
@@ -510,6 +512,8 @@ p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 
+/* Set foreign mfn in the current guest's p2m table. */
+int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
 /* 
  * Populate-on-demand
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [V6 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-06  2:38 [V6 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (3 preceding siblings ...)
  2013-12-06  2:38 ` [V6 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign Mukesh Rathor
@ 2013-12-06  2:38 ` Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
  2013-12-06  2:38 ` [V6 PATCH 7/7] pvh dom0: add opt_dom0pvh to setup.c Mukesh Rathor
  6 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, dgdegra, keir.xen, tim, JBeulich

In preparation for the next patch, we update xsm_add_to_physmap to
allow for checking of foreign domain. Thus, the current domain must
have the right to update the mappings of target domain with pages from
foreign domain.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/arm/mm.c       |    4 ++--
 xen/arch/x86/mm.c       |   18 +++++++++++++++---
 xen/include/xsm/dummy.h |   10 ++++++++--
 xen/include/xsm/xsm.h   |    6 +++---
 xen/xsm/flask/hooks.c   |    9 +++++++--
 5 files changed, 35 insertions(+), 12 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index e6753fe..c776cfe 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1122,7 +1122,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
+        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL);
         if ( rc )
         {
             rcu_unlock_domain(d);
@@ -1153,7 +1153,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
+        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL);
         if ( rc )
         {
             rcu_unlock_domain(d);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 4ae4523..e3da479 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4730,7 +4730,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d) )
+        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL) )
         {
             rcu_unlock_domain(d);
             return -EPERM;
@@ -4756,7 +4756,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
     case XENMEM_add_to_physmap_range:
     {
         struct xen_add_to_physmap_range xatpr;
-        struct domain *d;
+        struct domain *d, *fd = NULL;
 
         if ( copy_from_guest(&xatpr, arg, 1) )
             return -EFAULT;
@@ -4769,10 +4769,22 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d)) == 0 )
+        if ( xatpr.space == XENMAPSPACE_gmfn_foreign )
+        {
+            fd = get_pg_owner(xatpr.foreign_domid);
+            if ( fd == NULL )
+            {
+                rcu_unlock_domain(d);
+                return -ESRCH;
+            }
+        }
+        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
+        if ( rc == 0 )
             rc = xenmem_add_to_physmap_range(d, &xatpr);
 
         rcu_unlock_domain(d);
+        if ( fd )
+            put_pg_owner(fd);
 
         if ( rc == -EAGAIN )
             rc = hypercall_create_continuation(
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index eb9e1a1..1228e52 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -467,10 +467,16 @@ static XSM_INLINE int xsm_pci_config_permission(XSM_DEFAULT_ARG struct domain *d
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2)
+static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d, struct domain *t, struct domain *f)
 {
+    int rc;
+
     XSM_ASSERT_ACTION(XSM_TARGET);
-    return xsm_default_action(action, d1, d2);
+    rc = xsm_default_action(action, d, t);
+    if ( f && !rc )
+        rc = xsm_default_action(action, d, f);
+
+    return rc;
 }
 
 static XSM_INLINE int xsm_remove_from_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2)
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 1939453..9ee9543 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -90,7 +90,7 @@ struct xsm_operations {
     int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_stat_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct page_info *page);
-    int (*add_to_physmap) (struct domain *d1, struct domain *d2);
+    int (*add_to_physmap) (struct domain *d, struct domain *t, struct domain *f);
     int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
     int (*claim_pages) (struct domain *d);
 
@@ -344,9 +344,9 @@ static inline int xsm_memory_pin_page(xsm_default_t def, struct domain *d1, stru
     return xsm_ops->memory_pin_page(d1, d2, page);
 }
 
-static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d1, struct domain *d2)
+static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d, struct domain *t, struct domain *f)
 {
-    return xsm_ops->add_to_physmap(d1, d2);
+    return xsm_ops->add_to_physmap(d, t, f);
 }
 
 static inline int xsm_remove_from_physmap(xsm_default_t def, struct domain *d1, struct domain *d2)
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 7cdef04..81294b1 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1068,9 +1068,14 @@ static inline int flask_tmem_control(void)
     return domain_has_xen(current->domain, XEN__TMEM_CONTROL);
 }
 
-static int flask_add_to_physmap(struct domain *d1, struct domain *d2)
+static int flask_add_to_physmap(struct domain *d, struct domain *t, struct domain *f)
 {
-    return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
+    int rc;
+
+    rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__PHYSMAP);
+    if ( f && !rc )
+        rc = domain_has_perm(d, f, SECCLASS_MMU, MMU__MAP_READ|MMU__MAP_WRITE);
+    return rc;
 }
 
 static int flask_remove_from_physmap(struct domain *d1, struct domain *d2)
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages
  2013-12-06  2:38 [V6 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (4 preceding siblings ...)
  2013-12-06  2:38 ` [V6 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
@ 2013-12-06  2:38 ` Mukesh Rathor
  2013-12-06  2:54   ` Mukesh Rathor
                     ` (3 more replies)
  2013-12-06  2:38 ` [V6 PATCH 7/7] pvh dom0: add opt_dom0pvh to setup.c Mukesh Rathor
  6 siblings, 4 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Also, support is added here to
XENMEM_remove_from_physmap to remove such pages. Note, in the remove
path, we must release the refcount that was taken during the map phase.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   88 +++++++++++++++++++++++++++++++++++++++++----
 xen/common/memory.c       |   37 ++++++++++++++++++-
 xen/include/asm-arm/p2m.h |    2 +
 3 files changed, 118 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..1a4d564 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4520,9 +4520,75 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
+/*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted so it is not lost
+ *              while mapped here. The refcnt is released in do_memory_op()
+ *              via XENMEM_remove_from_physmap.
+ * Returns: 0 ==> success
+ */
+static int xenmem_add_foreign_to_p2m(struct domain *tdom, unsigned long fgfn,
+                                     unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    int rc = 0;
+    unsigned long prev_mfn, mfn = 0;
+    struct page_info *page = NULL;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = mfn_x(get_gfn(tdom, gpfn, &p2mt_prev));
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, _mfn(mfn)) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn, fgfn, tdom->domain_id, fdom->domain_id);
+        put_page(page);
+        rc = -EINVAL;
+    }
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4647,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = xenmem_add_foreign_to_p2m(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4719,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4745,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4767,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4854,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..7103c8b 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -675,9 +675,11 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 
     case XENMEM_remove_from_physmap:
     {
+        unsigned long mfn;
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt = -1;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,11 +695,42 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         * See also xenmem_add_foreign_to_p2m().
+         */
         page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
         if ( page )
+            mfn = page_to_mfn(page);
+#ifdef CONFIG_X86
+        else
         {
-            guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
-            put_page(page);
+            mfn = mfn_x(get_gfn_query(d, xrfp.gpfn, &p2mt));
+            if ( p2m_is_foreign(p2mt) )
+            {
+                struct domain *foreign_dom;
+
+                foreign_dom = page_get_owner(mfn_to_page(mfn));
+                ASSERT(is_pvh_domain(d));
+                ASSERT(d != foreign_dom);
+            }
+        }
+#endif
+        if ( page || p2m_is_foreign(p2mt) )
+        {
+            guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
+            if ( page )
+                put_page(page);
+
+            if ( p2m_is_foreign(p2mt) )
+            {
+                put_page(mfn_to_page(mfn));
+                put_gfn(d, xrfp.gpfn);
+            }
         }
         else
             rc = -ENOENT;
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..f079f00 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -110,6 +110,8 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) (0)
+
 #endif /* _XEN_P2M_H */
 
 /*
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [V6 PATCH 7/7] pvh dom0: add opt_dom0pvh to setup.c
  2013-12-06  2:38 [V6 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (5 preceding siblings ...)
  2013-12-06  2:38 ` [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
@ 2013-12-06  2:38 ` Mukesh Rathor
  6 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/setup.c |   12 +++++++++---
 1 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index f07ee2b..7eaac85 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -61,6 +61,10 @@ integer_param("maxcpus", max_cpus);
 static bool_t __initdata disable_smep;
 invbool_param("smep", disable_smep);
 
+/* Boot dom0 in pvh mode */
+bool_t __initdata opt_dom0pvh;
+boolean_param("dom0pvh", opt_dom0pvh);
+
 /* **** Linux config option: propagated to domain0. */
 /* "acpi=off":    Sisables both ACPI table parsing and interpreter. */
 /* "acpi=force":  Override the disable blacklist.                   */
@@ -545,7 +549,7 @@ void __init __start_xen(unsigned long mbi_p)
 {
     char *memmap_type = NULL;
     char *cmdline, *kextra, *loader;
-    unsigned int initrdidx;
+    unsigned int initrdidx, domcr_flags = 0;
     multiboot_info_t *mbi = __va(mbi_p);
     module_t *mod = (module_t *)__va(mbi->mods_addr);
     unsigned long nr_pages, raw_max_page, modules_headroom, *module_map;
@@ -1332,8 +1336,10 @@ void __init __start_xen(unsigned long mbi_p)
     if ( !tboot_protect_mem_regions() )
         panic("Could not protect TXT memory regions");
 
-    /* Create initial domain 0. */
-    dom0 = domain_create(0, DOMCRF_s3_integrity, 0);
+     /* Create initial domain 0. */
+     domcr_flags = (opt_dom0pvh ? DOMCRF_pvh | DOMCRF_hap : 0);
+     domcr_flags |= DOMCRF_s3_integrity;
+     dom0 = domain_create(0, domcr_flags, 0);
     if ( IS_ERR(dom0) || (alloc_dom0_vcpu0() == NULL) )
         panic("Error creating domain 0");
 
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages
  2013-12-06  2:38 ` [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
@ 2013-12-06  2:54   ` Mukesh Rathor
  2013-12-06 11:46     ` Jan Beulich
  2013-12-07  2:34   ` [V6 PATCH 6.1/7] " Mukesh Rathor
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-06  2:54 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, Xen-devel, tim, keir.xen, JBeulich

On Thu,  5 Dec 2013 18:38:43 -0800
Mukesh Rathor <mukesh.rathor@oracle.com> wrote:

> In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Also, support is added here to
> XENMEM_remove_from_physmap to remove such pages. Note, in the remove
> path, we must release the refcount that was taken during the map
> phase.

actually, now build break on arm with unused p2mt. So here's fixed up
patch with modified include/asm-arm/p2m.h:

-----------

In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Also, support is added here to
XENMEM_remove_from_physmap to remove such pages. Note, in the remove
path, we must release the refcount that was taken during the map phase.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   88 +++++++++++++++++++++++++++++++++++++++++----
 xen/common/memory.c       |   37 ++++++++++++++++++-
 xen/include/asm-arm/p2m.h |    2 +
 3 files changed, 118 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..1a4d564 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4520,9 +4520,75 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
+/*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted so it is not lost
+ *              while mapped here. The refcnt is released in do_memory_op()
+ *              via XENMEM_remove_from_physmap.
+ * Returns: 0 ==> success
+ */
+static int xenmem_add_foreign_to_p2m(struct domain *tdom, unsigned long fgfn,
+                                     unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    int rc = 0;
+    unsigned long prev_mfn, mfn = 0;
+    struct page_info *page = NULL;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = mfn_x(get_gfn(tdom, gpfn, &p2mt_prev));
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, _mfn(mfn)) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn, fgfn, tdom->domain_id, fdom->domain_id);
+        put_page(page);
+        rc = -EINVAL;
+    }
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4647,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = xenmem_add_foreign_to_p2m(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4719,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4745,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4767,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4854,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..7103c8b 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -675,9 +675,11 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 
     case XENMEM_remove_from_physmap:
     {
+        unsigned long mfn;
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt = -1;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,11 +695,42 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         * See also xenmem_add_foreign_to_p2m().
+         */
         page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
         if ( page )
+            mfn = page_to_mfn(page);
+#ifdef CONFIG_X86
+        else
         {
-            guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
-            put_page(page);
+            mfn = mfn_x(get_gfn_query(d, xrfp.gpfn, &p2mt));
+            if ( p2m_is_foreign(p2mt) )
+            {
+                struct domain *foreign_dom;
+
+                foreign_dom = page_get_owner(mfn_to_page(mfn));
+                ASSERT(is_pvh_domain(d));
+                ASSERT(d != foreign_dom);
+            }
+        }
+#endif
+        if ( page || p2m_is_foreign(p2mt) )
+        {
+            guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
+            if ( page )
+                put_page(page);
+
+            if ( p2m_is_foreign(p2mt) )
+            {
+                put_page(mfn_to_page(mfn));
+                put_gfn(d, xrfp.gpfn);
+            }
         }
         else
             rc = -ENOENT;
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..ac109f8 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -110,6 +110,8 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) ( 0 && (_t) )
+
 #endif /* _XEN_P2M_H */
 
 /*
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages
  2013-12-06  2:54   ` Mukesh Rathor
@ 2013-12-06 11:46     ` Jan Beulich
  2013-12-07  2:09       ` Mukesh Rathor
  0 siblings, 1 reply; 46+ messages in thread
From: Jan Beulich @ 2013-12-06 11:46 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, xen-devel, keir.xen, tim

>>> On 06.12.13 at 03:54, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> @@ -693,11 +695,42 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>              return rc;
>          }
>  
> +        /*
> +         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
> +         * from foreign domain by the user space tool during domain creation.
> +         * We need to check for that, free it up from the p2m, and release
> +         * refcnt on it. In such a case, page would be NULL and the following
> +         * call would not have refcnt'd the page.
> +         * See also xenmem_add_foreign_to_p2m().
> +         */
>          page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
>          if ( page )
> +            mfn = page_to_mfn(page);
> +#ifdef CONFIG_X86

I take this to mean that the code is okay for ARM now. But such a
conditional here needs explanation in a code comment, or putting
into something that's generic (i.e. "else if ()") but currently happening
to be always false for ARM.

> +        else
>          {
> -            guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
> -            put_page(page);
> +            mfn = mfn_x(get_gfn_query(d, xrfp.gpfn, &p2mt));
> +            if ( p2m_is_foreign(p2mt) )
> +            {
> +                struct domain *foreign_dom;
> +
> +                foreign_dom = page_get_owner(mfn_to_page(mfn));
> +                ASSERT(is_pvh_domain(d));
> +                ASSERT(d != foreign_dom);
> +            }
> +        }
> +#endif
> +        if ( page || p2m_is_foreign(p2mt) )
> +        {
> +            guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
> +            if ( page )
> +                put_page(page);
> +
> +            if ( p2m_is_foreign(p2mt) )
> +            {
> +                put_page(mfn_to_page(mfn));
> +                put_gfn(d, xrfp.gpfn);
> +            }

The code as it stands gives the impression that there could be two
put_page() invocations in a single run here. Based on the comment
above I assume this should never be the case though. That would
be nice to be documented via a suitable ASSERT(), or it could be
made more obvious by doing something like

        if ( page || p2m_is_foreign(p2mt) )
        {
            guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
            if ( p2m_is_foreign(p2mt) )
            {
                page = mfn_to_page(mfn);
                put_gfn(d, xrfp.gpfn);
            }
            put_page(page);
        }

Jan

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages
  2013-12-06 11:46     ` Jan Beulich
@ 2013-12-07  2:09       ` Mukesh Rathor
  0 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-07  2:09 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, xen-devel, keir.xen, tim

On Fri, 06 Dec 2013 11:46:35 +0000
"Jan Beulich" <JBeulich@suse.com> wrote:

> >>> On 06.12.13 at 03:54, Mukesh Rathor <mukesh.rathor@oracle.com>
> >>> wrote:
> > @@ -693,11 +695,42 @@ long do_memory_op(unsigned long cmd,
> > XEN_GUEST_HANDLE_PARAM(void) arg) return rc;
> >          }
> >  
> > +        /*
> > +         * If autotranslate guest, (eg pvh), the gfn could be
> > mapped to a mfn
> > +         * from foreign domain by the user space tool during
> > domain creation.
> > +         * We need to check for that, free it up from the p2m, and
> > release
> > +         * refcnt on it. In such a case, page would be NULL and
> > the following
> > +         * call would not have refcnt'd the page.
> > +         * See also xenmem_add_foreign_to_p2m().
> > +         */
> >          page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
> >          if ( page )
> > +            mfn = page_to_mfn(page);
> > +#ifdef CONFIG_X86
> 
> I take this to mean that the code is okay for ARM now. But such a
> conditional here needs explanation in a code comment, or putting
> into something that's generic (i.e. "else if ()") but currently
> happening to be always false for ARM.
> 
> > +        else
> >          {
> > -            guest_physmap_remove_page(d, xrfp.gpfn,
> > page_to_mfn(page), 0);
> > -            put_page(page);
> > +            mfn = mfn_x(get_gfn_query(d, xrfp.gpfn, &p2mt));
> > +            if ( p2m_is_foreign(p2mt) )
> > +            {
> > +                struct domain *foreign_dom;
> > +
> > +                foreign_dom = page_get_owner(mfn_to_page(mfn));
> > +                ASSERT(is_pvh_domain(d));
> > +                ASSERT(d != foreign_dom);
> > +            }
> > +        }
> > +#endif
> > +        if ( page || p2m_is_foreign(p2mt) )
> > +        {
> > +            guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
> > +            if ( page )
> > +                put_page(page);
> > +
> > +            if ( p2m_is_foreign(p2mt) )
> > +            {
> > +                put_page(mfn_to_page(mfn));
> > +                put_gfn(d, xrfp.gpfn);
> > +            }
> 
> The code as it stands gives the impression that there could be two
> put_page() invocations in a single run here. Based on the comment
> above I assume this should never be the case though. That would
> be nice to be documented via a suitable ASSERT(), or it could be
> made more obvious by doing something like
> 
>         if ( page || p2m_is_foreign(p2mt) )
>         {
>             guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
>             if ( p2m_is_foreign(p2mt) )
>             {
>                 page = mfn_to_page(mfn);
>                 put_gfn(d, xrfp.gpfn);
>             }
>             put_page(page);
>         }

Good idea to just have one put_page. But I reworked the code to make
xenmem_rem_foreign_from_p2m like Ian C suggested. I think that may be
the cleanest solution. Please take a look.

thanks
Mukesh

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.1/7] pvh dom0: Add and remove foreign pages
  2013-12-06  2:38 ` [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
  2013-12-06  2:54   ` Mukesh Rathor
@ 2013-12-07  2:34   ` Mukesh Rathor
  2013-12-07 16:06     ` Julien Grall
                       ` (3 more replies)
  2013-12-09  2:45   ` [V6 PATCH 6/7] " Julien Grall
  2013-12-11  0:27   ` [V6 PATCH 6.2/7] " Mukesh Rathor
  3 siblings, 4 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-07  2:34 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen, JBeulich

New version of the patch with xenmem_rem_foreign_from_p2m() created:

In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Another function
xenmem_rem_foreign_from_p2m() is added to remove such pages. Note, in
the remove path, we must release the refcount that was taken during
the map phase.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |  114 ++++++++++++++++++++++++++++++++++++++++++---
 xen/common/memory.c       |   12 ++++-
 xen/include/asm-arm/p2m.h |    9 +++-
 xen/include/asm-x86/p2m.h |    3 +
 4 files changed, 128 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..78f4650 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4520,9 +4520,101 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
+/*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted so it is not lost
+ *              while mapped here. The refcnt is released in do_memory_op()
+ *              via XENMEM_remove_from_physmap.
+ * Returns: 0 ==> success
+ */
+static int xenmem_add_foreign_to_p2m(struct domain *tdom, unsigned long fgfn,
+                                     unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    int rc = 0;
+    unsigned long prev_mfn, mfn = 0;
+    struct page_info *page = NULL;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = mfn_x(get_gfn(tdom, gpfn, &p2mt_prev));
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, _mfn(mfn)) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn, fgfn, tdom->domain_id, fdom->domain_id);
+        put_page(page);
+        rc = -EINVAL;
+    }
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
+/* Note, the refcnt released here is taken in xenmem_add_foreign_to_p2m */
+int xenmem_rem_foreign_from_p2m(struct domain *d, unsigned long gpfn)
+{
+    unsigned long mfn;
+    p2m_type_t p2mt;
+    struct domain *foreign_dom;
+
+    mfn = mfn_x(get_gfn_query(d, gpfn, &p2mt));
+    if ( !mfn_valid(mfn) )
+    {
+        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
+                 gpfn, d->domain_id);
+        return -EINVAL;
+    }
+
+    foreign_dom = page_get_owner(mfn_to_page(mfn));
+    ASSERT(d != foreign_dom);
+    ASSERT(is_pvh_domain(d));
+
+    guest_physmap_remove_page(d, gpfn, mfn, 0);
+    put_page(mfn_to_page(mfn));
+    put_gfn(d, gpfn);
+
+    return 0;
+}
+
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4673,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = xenmem_add_foreign_to_p2m(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4745,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4771,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4793,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4880,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..80c660e 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -678,6 +678,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,12 +694,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         */
+        page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
         if ( page )
         {
             guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
             put_page(page);
         }
+        else if ( p2m_is_foreign(p2mt) )
+            rc = xenmem_rem_foreign_from_p2m(d, xrfp.gpfn);
         else
             rc = -ENOENT;
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..0c554a5 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -83,8 +83,6 @@ static inline struct page_info *get_page_from_gfn(
     struct page_info *page;
     unsigned long mfn = gmfn_to_mfn(d, gfn);
 
-    ASSERT(t == NULL);
-
     if (!mfn_valid(mfn))
         return NULL;
     page = mfn_to_page(mfn);
@@ -110,6 +108,13 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) (0 && (_t))
+static inline int xenmem_rem_foreign_from_p2m(struct domain *d, 
+                                              unsigned long gpfn)
+{
+    return -ENOSYS;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6fc71a1..aacdffe 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -515,6 +515,9 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 /* Set foreign mfn in the current guest's p2m table. */
 int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
+/* Remove foreign mapping from the guest's p2m table. */
+int xenmem_rem_foreign_from_p2m(struct domain *d, unsigned long gpfn);
+
 /* 
  * Populate-on-demand
  */
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.1/7] pvh dom0: Add and remove foreign pages
  2013-12-07  2:34   ` [V6 PATCH 6.1/7] " Mukesh Rathor
@ 2013-12-07 16:06     ` Julien Grall
  2013-12-09  9:50     ` Jan Beulich
                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 46+ messages in thread
From: Julien Grall @ 2013-12-07 16:06 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen, JBeulich



On 12/07/2013 02:34 AM, Mukesh Rathor wrote:
> New version of the patch with xenmem_rem_foreign_from_p2m() created:
>
> In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Another function
> xenmem_rem_foreign_from_p2m() is added to remove such pages. Note, in
> the remove path, we must release the refcount that was taken during
> the map phase.
>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> ---

[..]


> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index c660820..0c554a5 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -83,8 +83,6 @@ static inline struct page_info *get_page_from_gfn(
>       struct page_info *page;
>       unsigned long mfn = gmfn_to_mfn(d, gfn);
>
> -    ASSERT(t == NULL);
> -

I would set *t to INT_MAX, for now. This will ensure that t is 
"correctly" initialized.

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages
  2013-12-06  2:38 ` [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
  2013-12-06  2:54   ` Mukesh Rathor
  2013-12-07  2:34   ` [V6 PATCH 6.1/7] " Mukesh Rathor
@ 2013-12-09  2:45   ` Julien Grall
  2013-12-09  2:57     ` Julien Grall
  2013-12-10  2:17     ` Mukesh Rathor
  2013-12-11  0:27   ` [V6 PATCH 6.2/7] " Mukesh Rathor
  3 siblings, 2 replies; 46+ messages in thread
From: Julien Grall @ 2013-12-09  2:45 UTC (permalink / raw)
  To: Mukesh Rathor, Xen-devel
  Cc: george.dunlap, keir.xen, tim, Ian Campbell, JBeulich



On 12/06/2013 02:38 AM, Mukesh Rathor wrote:
> In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Also, support is added here to
> XENMEM_remove_from_physmap to remove such pages. Note, in the remove
> path, we must release the refcount that was taken during the map phase.
>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> ---
>   xen/arch/x86/mm.c         |   88 +++++++++++++++++++++++++++++++++++++++++----
>   xen/common/memory.c       |   37 ++++++++++++++++++-
>   xen/include/asm-arm/p2m.h |    2 +
>   3 files changed, 118 insertions(+), 9 deletions(-)

This patch doesn't compile on ARM:
memory.c: In function 'do_memory_op':
memory.c:682:20: error: unused variable 'p2mt' [-Werror=unused-variable]
cc1: all warnings being treated as errors

For x86, when a domain is destroyed and there is still some foreign page 
mapped, you forget to decrease the refcount (via put_page). It will 
likely result to a zombie domain.
For instance a stubdomain with foreign map on a guest. If the stubdomain 
doesn't release the page, this guest will become a zombie.

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages
  2013-12-09  2:45   ` [V6 PATCH 6/7] " Julien Grall
@ 2013-12-09  2:57     ` Julien Grall
  2013-12-10  2:17     ` Mukesh Rathor
  1 sibling, 0 replies; 46+ messages in thread
From: Julien Grall @ 2013-12-09  2:57 UTC (permalink / raw)
  To: Mukesh Rathor, Xen-devel
  Cc: george.dunlap, keir.xen, tim, Ian Campbell, JBeulich



On 12/09/2013 02:45 AM, Julien Grall wrote:
>
>
> On 12/06/2013 02:38 AM, Mukesh Rathor wrote:
>> In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
>> to map pages from foreign guest into current dom0 for domU creation.
>> Such pages are typed p2m_map_foreign. Also, support is added here to
>> XENMEM_remove_from_physmap to remove such pages. Note, in the remove
>> path, we must release the refcount that was taken during the map phase.
>>
>> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
>> ---
>>   xen/arch/x86/mm.c         |   88
>> +++++++++++++++++++++++++++++++++++++++++----
>>   xen/common/memory.c       |   37 ++++++++++++++++++-
>>   xen/include/asm-arm/p2m.h |    2 +
>>   3 files changed, 118 insertions(+), 9 deletions(-)
>
> This patch doesn't compile on ARM:
> memory.c: In function 'do_memory_op':
> memory.c:682:20: error: unused variable 'p2mt' [-Werror=unused-variable]
> cc1: all warnings being treated as errors

Hmmm actually, I forgot to retrieve your V6.1. Forget this part.


-- 
Julien Grall

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.1/7] pvh dom0: Add and remove foreign pages
  2013-12-07  2:34   ` [V6 PATCH 6.1/7] " Mukesh Rathor
  2013-12-07 16:06     ` Julien Grall
@ 2013-12-09  9:50     ` Jan Beulich
  2013-12-10  1:30       ` Mukesh Rathor
  2013-12-09 10:31     ` Ian Campbell
  2013-12-09 12:11     ` Tim Deegan
  3 siblings, 1 reply; 46+ messages in thread
From: Jan Beulich @ 2013-12-09  9:50 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, Xen-devel, keir.xen, tim, Ian Campbell

>>> On 07.12.13 at 03:34, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> New version of the patch with xenmem_rem_foreign_from_p2m() created:
> 
> In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Another function
> xenmem_rem_foreign_from_p2m() is added to remove such pages. Note, in
> the remove path, we must release the refcount that was taken during
> the map phase.

Wouldn't both of the new functions better go into arch/x86/mm/p2m.c
(as already reflected by the declaration of the one that's currently
not static being placed in p2m.h)? In any event, the p2m interaction
here needs Tim's blessing.

> +/* Note, the refcnt released here is taken in xenmem_add_foreign_to_p2m */
> +int xenmem_rem_foreign_from_p2m(struct domain *d, unsigned long gpfn)
> +{
> +    unsigned long mfn;
> +    p2m_type_t p2mt;
> +    struct domain *foreign_dom;
> +
> +    mfn = mfn_x(get_gfn_query(d, gpfn, &p2mt));
> +    if ( !mfn_valid(mfn) )
> +    {
> +        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
> +                 gpfn, d->domain_id);
> +        return -EINVAL;
> +    }

ASSERT(p2m_is_foreign(p2mt)) ?

> +
> +    foreign_dom = page_get_owner(mfn_to_page(mfn));
> +    ASSERT(d != foreign_dom);
> +    ASSERT(is_pvh_domain(d));

Wouldn't this better be done first thing in the function?

> +
> +    guest_physmap_remove_page(d, gpfn, mfn, 0);
> +    put_page(mfn_to_page(mfn));
> +    put_gfn(d, gpfn);
> +
> +    return 0;
> +}

Jan

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.1/7] pvh dom0: Add and remove foreign pages
  2013-12-07  2:34   ` [V6 PATCH 6.1/7] " Mukesh Rathor
  2013-12-07 16:06     ` Julien Grall
  2013-12-09  9:50     ` Jan Beulich
@ 2013-12-09 10:31     ` Ian Campbell
  2013-12-09 13:46       ` Julien Grall
  2013-12-09 12:11     ` Tim Deegan
  3 siblings, 1 reply; 46+ messages in thread
From: Ian Campbell @ 2013-12-09 10:31 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, george.dunlap, tim, Julien Grall, keir.xen, JBeulich

On Fri, 2013-12-06 at 18:34 -0800, Mukesh Rathor wrote:
> New version of the patch with xenmem_rem_foreign_from_p2m() created:
> 
> In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Another function
> xenmem_rem_foreign_from_p2m() is added to remove such pages. Note, in
> the remove path, we must release the refcount that was taken during
> the map phase.

Thanks, the common code portions are much cleaner with this approach and
the ARM stubs look fine for now.

I noticed that you enforce that the domain is foreign, and assert on
teardown, which I think is a good idea. I don't think we do this on ARM
right now -- Julien do you think we should do this? If yes can you
arrange to do it in your series?

Ian.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign
  2013-12-06  2:38 ` [V6 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign Mukesh Rathor
@ 2013-12-09 12:02   ` Tim Deegan
  0 siblings, 0 replies; 46+ messages in thread
From: Tim Deegan @ 2013-12-09 12:02 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, Xen-devel, keir.xen, JBeulich

At 18:38 -0800 on 05 Dec (1386265121), Mukesh Rathor wrote:
> In this patch, a new type p2m_map_foreign is introduced for pages
> that toolstack on PVH dom0 maps from foreign domains that its creating
> or supporting during it's run time.
> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.1/7] pvh dom0: Add and remove foreign pages
  2013-12-07  2:34   ` [V6 PATCH 6.1/7] " Mukesh Rathor
                       ` (2 preceding siblings ...)
  2013-12-09 10:31     ` Ian Campbell
@ 2013-12-09 12:11     ` Tim Deegan
  2013-12-10  2:16       ` Mukesh Rathor
  3 siblings, 1 reply; 46+ messages in thread
From: Tim Deegan @ 2013-12-09 12:11 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, Xen-devel, keir.xen, JBeulich, Ian Campbell

Hi,

At 18:34 -0800 on 06 Dec (1386351256), Mukesh Rathor wrote:
> In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Another function
> xenmem_rem_foreign_from_p2m() is added to remove such pages. Note, in
> the remove path, we must release the refcount that was taken during
> the map phase.
> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
[...]
> +/*
> + * Add frames from foreign domain to target domain's physmap. Similar to
> + * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
> + * and is not removed from foreign domain.
> + * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
> + * Side Effect: the mfn for fgfn will be refcounted so it is not lost
> + *              while mapped here. The refcnt is released in do_memory_op()
> + *              via XENMEM_remove_from_physmap.

Is that comment out of date?  AFAICS the put_page() happens...

> +/* Note, the refcnt released here is taken in xenmem_add_foreign_to_p2m */
> +int xenmem_rem_foreign_from_p2m(struct domain *d, unsigned long gpfn)
> +{
> +    unsigned long mfn;
> +    p2m_type_t p2mt;
> +    struct domain *foreign_dom;
> +
> +    mfn = mfn_x(get_gfn_query(d, gpfn, &p2mt));
> +    if ( !mfn_valid(mfn) )
> +    {
> +        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
> +                 gpfn, d->domain_id);
> +        return -EINVAL;
> +    }
> +
> +    foreign_dom = page_get_owner(mfn_to_page(mfn));
> +    ASSERT(d != foreign_dom);
> +    ASSERT(is_pvh_domain(d));
> +
> +    guest_physmap_remove_page(d, gpfn, mfn, 0);
> +    put_page(mfn_to_page(mfn));

...here, and doesn't look safe.  This put_page() is to balance the
get_page() in xenmem_add_foreign_to_p2m() but (a) you haven't checked
here that the entry you're removing is actually a foreign one and (b)
you haven't updated any of the other paths that might clear a p2m
entry that contained a foreign mapping.

I think the refcounting will have to be done at the bottom of the
arch-specific implementation, where the actual p2m entry gets set or
cleared.

Tim.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.1/7] pvh dom0: Add and remove foreign pages
  2013-12-09 10:31     ` Ian Campbell
@ 2013-12-09 13:46       ` Julien Grall
  0 siblings, 0 replies; 46+ messages in thread
From: Julien Grall @ 2013-12-09 13:46 UTC (permalink / raw)
  To: Ian Campbell, Mukesh Rathor
  Cc: Xen-devel, george.dunlap, tim, Julien Grall, keir.xen, JBeulich



On 12/09/2013 10:31 AM, Ian Campbell wrote:
> On Fri, 2013-12-06 at 18:34 -0800, Mukesh Rathor wrote:
>> New version of the patch with xenmem_rem_foreign_from_p2m() created:
>>
>> In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
>> to map pages from foreign guest into current dom0 for domU creation.
>> Such pages are typed p2m_map_foreign. Another function
>> xenmem_rem_foreign_from_p2m() is added to remove such pages. Note, in
>> the remove path, we must release the refcount that was taken during
>> the map phase.
>
> Thanks, the common code portions are much cleaner with this approach and
> the ARM stubs look fine for now.
>
> I noticed that you enforce that the domain is foreign, and assert on
> teardown, which I think is a good idea. I don't think we do this on ARM
> right now -- Julien do you think we should do this? If yes can you
> arrange to do it in your series?

Actually I have added an ASSERT in the remove helper but forgot to check 
in xenmem_add_to_physmap_one.

I will do it in the next version of the patch series.

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.1/7] pvh dom0: Add and remove foreign pages
  2013-12-09  9:50     ` Jan Beulich
@ 2013-12-10  1:30       ` Mukesh Rathor
  0 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-10  1:30 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, Xen-devel, keir.xen, tim, Ian Campbell

On Mon, 09 Dec 2013 09:50:28 +0000
"Jan Beulich" <JBeulich@suse.com> wrote:

> >>> On 07.12.13 at 03:34, Mukesh Rathor <mukesh.rathor@oracle.com>
> >>> wrote:
> > New version of the patch with xenmem_rem_foreign_from_p2m() created:
> > 
> > In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> > to map pages from foreign guest into current dom0 for domU creation.
> > Such pages are typed p2m_map_foreign. Another function
> > xenmem_rem_foreign_from_p2m() is added to remove such pages. Note,
> > in the remove path, we must release the refcount that was taken
> > during the map phase.
> 
> Wouldn't both of the new functions better go into arch/x86/mm/p2m.c
> (as already reflected by the declaration of the one that's currently
> not static being placed in p2m.h)? In any event, the p2m interaction
> here needs Tim's blessing.

ok, i can move them to p2m.c, perhaps rename to p2m_add_foreign...

> > +/* Note, the refcnt released here is taken in
> > xenmem_add_foreign_to_p2m */ +int
> > xenmem_rem_foreign_from_p2m(struct domain *d, unsigned long gpfn) +{
> > +    unsigned long mfn;
> > +    p2m_type_t p2mt;
> > +    struct domain *foreign_dom;
> > +
> > +    mfn = mfn_x(get_gfn_query(d, gpfn, &p2mt));
> > +    if ( !mfn_valid(mfn) )
> > +    {
> > +        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx
> > domid:%d\n",
> > +                 gpfn, d->domain_id);
> > +        return -EINVAL;
> > +    }
> 
> ASSERT(p2m_is_foreign(p2mt)) ?

Called for foreign only, but good idea to check for it in case of
other callers in future.

> > +
> > +    foreign_dom = page_get_owner(mfn_to_page(mfn));
> > +    ASSERT(d != foreign_dom);
> > +    ASSERT(is_pvh_domain(d));
> 
> Wouldn't this better be done first thing in the function?

ok.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.1/7] pvh dom0: Add and remove foreign pages
  2013-12-09 12:11     ` Tim Deegan
@ 2013-12-10  2:16       ` Mukesh Rathor
  0 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-10  2:16 UTC (permalink / raw)
  To: Tim Deegan; +Cc: george.dunlap, Xen-devel, keir.xen, JBeulich, Ian Campbell

On Mon, 9 Dec 2013 13:11:49 +0100
Tim Deegan <tim@xen.org> wrote:

> Hi,
> 
> At 18:34 -0800 on 06 Dec (1386351256), Mukesh Rathor wrote:
> > In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> > to map pages from foreign guest into current dom0 for domU creation.
> > Such pages are typed p2m_map_foreign. Another function
> > xenmem_rem_foreign_from_p2m() is added to remove such pages. Note,
> > in the remove path, we must release the refcount that was taken
> > during the map phase.
> > 
> > Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> [...]
> > +/*
> > + * Add frames from foreign domain to target domain's physmap.
> > Similar to
> > + * XENMAPSPACE_gmfn but the frame is foreign being mapped into
> > current,
> > + * and is not removed from foreign domain.
> > + * Usage: libxl on pvh dom0 creating a guest and doing
> > privcmd_ioctl_mmap.
> > + * Side Effect: the mfn for fgfn will be refcounted so it is not
> > lost
> > + *              while mapped here. The refcnt is released in
> > do_memory_op()
> > + *              via XENMEM_remove_from_physmap.
> 
> Is that comment out of date?  AFAICS the put_page() happens...

yup.

> > +/* Note, the refcnt released here is taken in
> > xenmem_add_foreign_to_p2m */ +int
> > xenmem_rem_foreign_from_p2m(struct domain *d, unsigned long gpfn) +{
> > +    unsigned long mfn;
> > +    p2m_type_t p2mt;
> > +    struct domain *foreign_dom;
> > +
> > +    mfn = mfn_x(get_gfn_query(d, gpfn, &p2mt));
> > +    if ( !mfn_valid(mfn) )
> > +    {
> > +        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx
> > domid:%d\n",
> > +                 gpfn, d->domain_id);
> > +        return -EINVAL;
> > +    }
> > +
> > +    foreign_dom = page_get_owner(mfn_to_page(mfn));
> > +    ASSERT(d != foreign_dom);
> > +    ASSERT(is_pvh_domain(d));
> > +
> > +    guest_physmap_remove_page(d, gpfn, mfn, 0);
> > +    put_page(mfn_to_page(mfn));
> 
> ...here, and doesn't look safe.  This put_page() is to balance the
> get_page() in xenmem_add_foreign_to_p2m() but (a) you haven't checked
> here that the entry you're removing is actually a foreign one and (b)
> you haven't updated any of the other paths that might clear a p2m
> entry that contained a foreign mapping.

(a)The function is only called for foreign currently, but good idea
to add a check now that this code is in a public function.

(b) right, i missed that. trying to figure places where i'd need to do
that. Looks like i could just do that in p2m_remove_page.

> I think the refcounting will have to be done at the bottom of the
> arch-specific implementation, where the actual p2m entry gets set or
> cleared.

Hmm... in the add path, need to get refcnt before removing previous
mapping, so i do get_page*, then remove prev mapping, then set p2m.

In the remove path, perhaps the put_page() could be moved to p2m_remove_page
thereby benefitting (b) above, but we miss the symmetry with add 
path. So either way... lmk.

thanks
Mukesh

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages
  2013-12-09  2:45   ` [V6 PATCH 6/7] " Julien Grall
  2013-12-09  2:57     ` Julien Grall
@ 2013-12-10  2:17     ` Mukesh Rathor
  1 sibling, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-10  2:17 UTC (permalink / raw)
  To: Julien Grall
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen, JBeulich

On Mon, 09 Dec 2013 02:45:19 +0000
Julien Grall <julien.grall@linaro.org> wrote:

> 
> 
> On 12/06/2013 02:38 AM, Mukesh Rathor wrote:
> > In this patch, a new function, xenmem_add_foreign_to_p2m(), is added
> > to map pages from foreign guest into current dom0 for domU creation.
> > Such pages are typed p2m_map_foreign. Also, support is added here to
> > XENMEM_remove_from_physmap to remove such pages. Note, in the remove
> > path, we must release the refcount that was taken during the map
> > phase.
> >
> > Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> > ---
> >   xen/arch/x86/mm.c         |   88
> > +++++++++++++++++++++++++++++++++++++++++----
> > xen/common/memory.c       |   37 ++++++++++++++++++-
> > xen/include/asm-arm/p2m.h |    2 + 3 files changed, 118
> > insertions(+), 9 deletions(-)
> 
> This patch doesn't compile on ARM:
> memory.c: In function 'do_memory_op':
> memory.c:682:20: error: unused variable
> 'p2mt' [-Werror=unused-variable] cc1: all warnings being treated as
> errors
> 
> For x86, when a domain is destroyed and there is still some foreign
> page mapped, you forget to decrease the refcount (via put_page). It
> will likely result to a zombie domain.

right, i totally forgot. i think i can do that in p2m_remove_page.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-06  2:38 ` [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
                     ` (2 preceding siblings ...)
  2013-12-09  2:45   ` [V6 PATCH 6/7] " Julien Grall
@ 2013-12-11  0:27   ` Mukesh Rathor
  2013-12-11  0:44     ` Mukesh Rathor
  3 siblings, 1 reply; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-11  0:27 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, Julien Grall, tim,
	keir.xen, JBeulich

New version, 6.2. In this version, I've moved the functions to 
p2m.c from mm.c, and renamed them appropriately. Also, I've moved the
put_page to p2m_remove_page(). As a result, if the domain is destroyed
the foreign pages will be apropriately released and not left hanging.
Hoping this does it.. :)...

thanks,
Mukesh

---------------

In this patch, a new function, p2m_add_foreign(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Another function
p2m_remove_foreign() is added to remove such pages. Note, in
the remove path, we must release the refcount that was taken during
the map phase. This is done in p2m_remove_page, which also addresses
releasing of refcnt when the domain is destroyed.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   23 +++++++---
 xen/arch/x86/mm/p2m.c     |  101 +++++++++++++++++++++++++++++++++++++++++++++
 xen/common/memory.c       |   12 +++++-
 xen/include/asm-arm/p2m.h |    9 ++++-
 xen/include/asm-x86/p2m.h |    7 +++
 5 files changed, 143 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..6c2edc4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4522,7 +4522,8 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
 
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4582,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = p2m_add_foreign(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4654,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4680,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4702,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4789,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0659ef1..fe8c2f6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -527,6 +527,10 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
             mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL);
             if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
                 set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+
+            /* foreign pages are always refcnt'd in the add path. */
+            if ( p2m_is_foreign(t) )
+                put_page(mfn_to_page(mfn_return));
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
     }
@@ -1741,6 +1745,103 @@ out_p2m_audit:
 #endif /* P2M_AUDIT */
 
 /*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted so it is not lost
+ *              while mapped here. The refcnt is released in p2m_remove_page
+ *              via p2m_remove_foreign.
+ * Returns: 0 ==> success
+ */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    mfn_t prev_mfn, mfn;
+    struct page_info *page;
+    int rc = 0;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, mfn_x(prev_mfn), 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, mfn) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
+        put_page(page);
+        rc = -EINVAL;
+    }
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
+/* Note, the refcnt taken in p2m_add_foreign is released in p2m_remove_page */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    struct domain *foreign_dom;
+
+    ASSERT(is_pvh_domain(d));
+
+    mfn = get_gfn_query(d, gpfn, &p2mt);
+    if ( unlikely(!p2m_is_foreign(p2mt)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid type for gpfn:%lx domid:%d t:%d\n",
+                 gpfn, d->domain_id, p2mt);
+        return -EINVAL;
+    }
+    if ( unlikely(!mfn_valid(mfn)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
+                 gpfn, d->domain_id);
+        return -EINVAL;
+    }
+
+    foreign_dom = page_get_owner(mfn_to_page(mfn));
+    ASSERT(d != foreign_dom);
+
+    guest_physmap_remove_page(d, gpfn, mfn_x(mfn), 0);
+    put_gfn(d, gpfn);
+    return 0;
+}
+/*
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..53f67c1 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -678,6 +678,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt = INT_MAX;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,12 +694,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         */
+        page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
         if ( page )
         {
             guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
             put_page(page);
         }
+        else if ( p2m_is_foreign(p2mt) )
+            rc = p2m_remove_foreign(d, xrfp.gpfn);
         else
             rc = -ENOENT;
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..d2fc85d 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -83,7 +83,7 @@ static inline struct page_info *get_page_from_gfn(
     struct page_info *page;
     unsigned long mfn = gmfn_to_mfn(d, gfn);
 
-    ASSERT(t == NULL);
+    ASSERT(t == INT_MAX);
 
     if (!mfn_valid(mfn))
         return NULL;
@@ -110,6 +110,13 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) (0 && (_t))
+static inline int xenmem_rem_foreign_from_p2m(struct domain *d,
+                                              unsigned long gpfn)
+{
+    return -ENOSYS;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6fc71a1..6371705 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -515,6 +515,13 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 /* Set foreign mfn in the current guest's p2m table. */
 int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
+/* Add foreign mapping to the guest's p2m table. */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom);
+
+/* Remove foreign mapping from the guest's p2m table. */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn);
+
 /* 
  * Populate-on-demand
  */
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-11  0:27   ` [V6 PATCH 6.2/7] " Mukesh Rathor
@ 2013-12-11  0:44     ` Mukesh Rathor
  2013-12-11  1:35       ` Julien Grall
  0 siblings, 1 reply; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-11  0:44 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, Julien Grall, tim,
	keir.xen, JBeulich

Grrrr... sent too soon before compiling on arm... here's version
with arm compile fixed. thanks.

-------------

In this patch, a new function, p2m_add_foreign(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Another function
p2m_remove_foreign() is added to remove such pages. Note, in
the remove path, we must release the refcount that was taken during
the map phase. This is done in p2m_remove_page, which also addresses
releasing of refcnt when the domain is destroyed.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   23 +++++++---
 xen/arch/x86/mm/p2m.c     |  101 +++++++++++++++++++++++++++++++++++++++++++++
 xen/common/memory.c       |   12 +++++-
 xen/include/asm-arm/p2m.h |    8 +++-
 xen/include/asm-x86/p2m.h |    7 +++
 5 files changed, 142 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..6c2edc4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4522,7 +4522,8 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
 
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4582,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = p2m_add_foreign(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4654,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4680,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4702,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4789,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0659ef1..fe8c2f6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -527,6 +527,10 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
             mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL);
             if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
                 set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+
+            /* foreign pages are always refcnt'd in the add path. */
+            if ( p2m_is_foreign(t) )
+                put_page(mfn_to_page(mfn_return));
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
     }
@@ -1741,6 +1745,103 @@ out_p2m_audit:
 #endif /* P2M_AUDIT */
 
 /*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted so it is not lost
+ *              while mapped here. The refcnt is released in p2m_remove_page
+ *              via p2m_remove_foreign.
+ * Returns: 0 ==> success
+ */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    mfn_t prev_mfn, mfn;
+    struct page_info *page;
+    int rc = 0;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, mfn_x(prev_mfn), 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, mfn) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
+        put_page(page);
+        rc = -EINVAL;
+    }
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
+/* Note, the refcnt taken in p2m_add_foreign is released in p2m_remove_page */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    struct domain *foreign_dom;
+
+    ASSERT(is_pvh_domain(d));
+
+    mfn = get_gfn_query(d, gpfn, &p2mt);
+    if ( unlikely(!p2m_is_foreign(p2mt)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid type for gpfn:%lx domid:%d t:%d\n",
+                 gpfn, d->domain_id, p2mt);
+        return -EINVAL;
+    }
+    if ( unlikely(!mfn_valid(mfn)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
+                 gpfn, d->domain_id);
+        return -EINVAL;
+    }
+
+    foreign_dom = page_get_owner(mfn_to_page(mfn));
+    ASSERT(d != foreign_dom);
+
+    guest_physmap_remove_page(d, gpfn, mfn_x(mfn), 0);
+    put_gfn(d, gpfn);
+    return 0;
+}
+/*
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..53f67c1 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -678,6 +678,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt = INT_MAX;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,12 +694,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         */
+        page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
         if ( page )
         {
             guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
             put_page(page);
         }
+        else if ( p2m_is_foreign(p2mt) )
+            rc = p2m_remove_foreign(d, xrfp.gpfn);
         else
             rc = -ENOENT;
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..03d34e9 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -83,7 +83,7 @@ static inline struct page_info *get_page_from_gfn(
     struct page_info *page;
     unsigned long mfn = gmfn_to_mfn(d, gfn);
 
-    ASSERT(t == NULL);
+    ASSERT(*t == INT_MAX);
 
     if (!mfn_valid(mfn))
         return NULL;
@@ -110,6 +110,12 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) (0 && (_t))
+static inline int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    return -ENOSYS;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6fc71a1..6371705 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -515,6 +515,13 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 /* Set foreign mfn in the current guest's p2m table. */
 int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
+/* Add foreign mapping to the guest's p2m table. */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom);
+
+/* Remove foreign mapping from the guest's p2m table. */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn);
+
 /* 
  * Populate-on-demand
  */
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-11  0:44     ` Mukesh Rathor
@ 2013-12-11  1:35       ` Julien Grall
  2013-12-11  1:47         ` Mukesh Rathor
  0 siblings, 1 reply; 46+ messages in thread
From: Julien Grall @ 2013-12-11  1:35 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen, JBeulich



On 12/11/2013 12:44 AM, Mukesh Rathor wrote:
> Grrrr... sent too soon before compiling on arm... here's version
> with arm compile fixed. thanks.
>
> -------------
>
> In this patch, a new function, p2m_add_foreign(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Another function
> p2m_remove_foreign() is added to remove such pages. Note, in
> the remove path, we must release the refcount that was taken during
> the map phase. This is done in p2m_remove_page, which also addresses
> releasing of refcnt when the domain is destroyed.
>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> ---

[..]

>
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index c660820..03d34e9 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -83,7 +83,7 @@ static inline struct page_info *get_page_from_gfn(
>       struct page_info *page;
>       unsigned long mfn = gmfn_to_mfn(d, gfn);
>
> -    ASSERT(t == NULL);
> +    ASSERT(*t == INT_MAX);

There is various place where get_page_from_gfn where t == NULL. With 
this solution it will segfault every time.

I would do something like that:
   if (*t)
     t = INT_MAX;

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-11  1:35       ` Julien Grall
@ 2013-12-11  1:47         ` Mukesh Rathor
  2013-12-11  9:23           ` Jan Beulich
  2013-12-11 14:29           ` Tim Deegan
  0 siblings, 2 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-11  1:47 UTC (permalink / raw)
  To: Julien Grall
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen, JBeulich

On Wed, 11 Dec 2013 01:35:08 +0000
Julien Grall <julien.grall@linaro.org> wrote:

> >       unsigned long mfn = gmfn_to_mfn(d, gfn);
> >
> > -    ASSERT(t == NULL);
> > +    ASSERT(*t == INT_MAX);
> 
> There is various place where get_page_from_gfn where t == NULL. With 
> this solution it will segfault every time.
> 
> I would do something like that:
>    if (*t)
>      t = INT_MAX;

here's updated:
------------

In this patch, a new function, p2m_add_foreign(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Another function
p2m_remove_foreign() is added to remove such pages. Note, in
the remove path, we must release the refcount that was taken during
the map phase. This is done in p2m_remove_page, which also addresses
releasing of refcnt when the domain is destroyed.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   23 +++++++---
 xen/arch/x86/mm/p2m.c     |  101 +++++++++++++++++++++++++++++++++++++++++++++
 xen/common/memory.c       |   12 +++++-
 xen/include/asm-arm/p2m.h |    9 ++++-
 xen/include/asm-x86/p2m.h |    7 +++
 5 files changed, 143 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..6c2edc4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4522,7 +4522,8 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
 
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4582,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = p2m_add_foreign(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4654,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4680,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4702,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4789,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0659ef1..fe8c2f6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -527,6 +527,10 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
             mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL);
             if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
                 set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+
+            /* foreign pages are always refcnt'd in the add path. */
+            if ( p2m_is_foreign(t) )
+                put_page(mfn_to_page(mfn_return));
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
     }
@@ -1741,6 +1745,103 @@ out_p2m_audit:
 #endif /* P2M_AUDIT */
 
 /*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted so it is not lost
+ *              while mapped here. The refcnt is released in p2m_remove_page
+ *              via p2m_remove_foreign.
+ * Returns: 0 ==> success
+ */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    mfn_t prev_mfn, mfn;
+    struct page_info *page;
+    int rc = 0;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, mfn_x(prev_mfn), 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, mfn) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
+        put_page(page);
+        rc = -EINVAL;
+    }
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
+/* Note, the refcnt taken in p2m_add_foreign is released in p2m_remove_page */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    struct domain *foreign_dom;
+
+    ASSERT(is_pvh_domain(d));
+
+    mfn = get_gfn_query(d, gpfn, &p2mt);
+    if ( unlikely(!p2m_is_foreign(p2mt)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid type for gpfn:%lx domid:%d t:%d\n",
+                 gpfn, d->domain_id, p2mt);
+        return -EINVAL;
+    }
+    if ( unlikely(!mfn_valid(mfn)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
+                 gpfn, d->domain_id);
+        return -EINVAL;
+    }
+
+    foreign_dom = page_get_owner(mfn_to_page(mfn));
+    ASSERT(d != foreign_dom);
+
+    guest_physmap_remove_page(d, gpfn, mfn_x(mfn), 0);
+    put_gfn(d, gpfn);
+    return 0;
+}
+/*
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..53f67c1 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -678,6 +678,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt = INT_MAX;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,12 +694,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         */
+        page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
         if ( page )
         {
             guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
             put_page(page);
         }
+        else if ( p2m_is_foreign(p2mt) )
+            rc = p2m_remove_foreign(d, xrfp.gpfn);
         else
             rc = -ENOENT;
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..485e907 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -83,7 +83,8 @@ static inline struct page_info *get_page_from_gfn(
     struct page_info *page;
     unsigned long mfn = gmfn_to_mfn(d, gfn);
 
-    ASSERT(t == NULL);
+    if ( t )
+        ASSERT(*t == INT_MAX);
 
     if (!mfn_valid(mfn))
         return NULL;
@@ -110,6 +111,12 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) (0 && (_t))
+static inline int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    return -ENOSYS;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6fc71a1..6371705 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -515,6 +515,13 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 /* Set foreign mfn in the current guest's p2m table. */
 int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
+/* Add foreign mapping to the guest's p2m table. */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom);
+
+/* Remove foreign mapping from the guest's p2m table. */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn);
+
 /* 
  * Populate-on-demand
  */
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-11  1:47         ` Mukesh Rathor
@ 2013-12-11  9:23           ` Jan Beulich
  2013-12-11 14:29           ` Tim Deegan
  1 sibling, 0 replies; 46+ messages in thread
From: Jan Beulich @ 2013-12-11  9:23 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, Julien Grall, tim, keir.xen

>>> On 11.12.13 at 02:47, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> On Wed, 11 Dec 2013 01:35:08 +0000
> Julien Grall <julien.grall@linaro.org> wrote:
> 
>> >       unsigned long mfn = gmfn_to_mfn(d, gfn);
>> >
>> > -    ASSERT(t == NULL);
>> > +    ASSERT(*t == INT_MAX);
>> 
>> There is various place where get_page_from_gfn where t == NULL. With 
>> this solution it will segfault every time.
>> 
>> I would do something like that:
>>    if (*t)
>>      t = INT_MAX;
>...
> @@ -83,7 +83,8 @@ static inline struct page_info *get_page_from_gfn(
>      struct page_info *page;
>      unsigned long mfn = gmfn_to_mfn(d, gfn);
>  
> -    ASSERT(t == NULL);
> +    if ( t )
> +        ASSERT(*t == INT_MAX);

If you already don't follow Julien's suggestion, then please

    ASSERT(!t || *t == INT_MAX);

Jan

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-11  1:47         ` Mukesh Rathor
  2013-12-11  9:23           ` Jan Beulich
@ 2013-12-11 14:29           ` Tim Deegan
  2013-12-12  2:46             ` Mukesh Rathor
  1 sibling, 1 reply; 46+ messages in thread
From: Tim Deegan @ 2013-12-11 14:29 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, Julien Grall, keir.xen, JBeulich

At 17:47 -0800 on 10 Dec (1386694075), Mukesh Rathor wrote:
> On Wed, 11 Dec 2013 01:35:08 +0000
> Julien Grall <julien.grall@linaro.org> wrote:
> 
> > >       unsigned long mfn = gmfn_to_mfn(d, gfn);
> > >
> > > -    ASSERT(t == NULL);
> > > +    ASSERT(*t == INT_MAX);
> > 
> > There is various place where get_page_from_gfn where t == NULL. With 
> > this solution it will segfault every time.
> > 
> > I would do something like that:
> >    if (*t)
> >      t = INT_MAX;
> 
> here's updated:
> ------------
> 
> In this patch, a new function, p2m_add_foreign(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Another function
> p2m_remove_foreign() is added to remove such pages. Note, in
> the remove path, we must release the refcount that was taken during
> the map phase. This is done in p2m_remove_page, which also addresses
> releasing of refcnt when the domain is destroyed.

Did you test that?  I don't think it can be true.

Maybe I wasn't clear last time: this refcount is effectively held by
the presence of a foreign mapping in a p2m entry.  AFAICT the only
properly safe way to make sure that broken guest/tools behaviour can't
mess up Xen's internal refcounting is to have the ref be taken and
dropped at the time that the entry itelf is written/replaced, e.g.
ept_set_entry() (or maybe atomic_write_ept_entry()) on EPT and
paging_write_p2m_entry() on NPT/shadow.

Trying to find all the higher-level operations that might cause
foreign mappings to be inserted/removed is going to be difficult and
fragile.

You'll also need to handle domain teardown, which right now just frees
all the memory holding the p2m tables (see p2m_teardown()).  That will
need somehow to check those tables for valid foreign mappings and DTRT
about them.

Tim.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-11 14:29           ` Tim Deegan
@ 2013-12-12  2:46             ` Mukesh Rathor
  2013-12-13  2:44               ` Mukesh Rathor
  0 siblings, 1 reply; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-12  2:46 UTC (permalink / raw)
  To: Tim Deegan
  Cc: Xen-devel, Ian Campbell, george.dunlap, Julien Grall, keir.xen, JBeulich

On Wed, 11 Dec 2013 15:29:03 +0100
Tim Deegan <tim@xen.org> wrote:

> At 17:47 -0800 on 10 Dec (1386694075), Mukesh Rathor wrote:
> > On Wed, 11 Dec 2013 01:35:08 +0000
> > Julien Grall <julien.grall@linaro.org> wrote:
> > 
> > > >       unsigned long mfn = gmfn_to_mfn(d, gfn);
> > > >
> > > > -    ASSERT(t == NULL);
> > > > +    ASSERT(*t == INT_MAX);
> > > 
> > > There is various place where get_page_from_gfn where t == NULL.
> > > With this solution it will segfault every time.
> > > 
> > > I would do something like that:
> > >    if (*t)
> > >      t = INT_MAX;
> > 
> > here's updated:
> > ------------
> > 
> > In this patch, a new function, p2m_add_foreign(), is added
> > to map pages from foreign guest into current dom0 for domU creation.
> > Such pages are typed p2m_map_foreign. Another function
> > p2m_remove_foreign() is added to remove such pages. Note, in
> > the remove path, we must release the refcount that was taken during
> > the map phase. This is done in p2m_remove_page, which also addresses
> > releasing of refcnt when the domain is destroyed.
> 
> Did you test that?  I don't think it can be true.

Yes. In this version, I had added code to p2m_remove_page() to do that.

> Maybe I wasn't clear last time: this refcount is effectively held by
> the presence of a foreign mapping in a p2m entry.  AFAICT the only
> properly safe way to make sure that broken guest/tools behaviour can't
> mess up Xen's internal refcounting is to have the ref be taken and
> dropped at the time that the entry itelf is written/replaced, e.g.
> ept_set_entry() (or maybe atomic_write_ept_entry()) on EPT and
> paging_write_p2m_entry() on NPT/shadow.

Ah, I was fixated on thinking only p2m_add_foreign was ever gonna
add p2m foreign. Hmm... a bit worried with all the p2m locking in p2m
path and me doing get_page* in ept_set_entry().... But, may be we'll be
ok. Looking at the code to refresh all the locking in my brain....

> Trying to find all the higher-level operations that might cause
> foreign mappings to be inserted/removed is going to be difficult and
> fragile.

Yeah, i found that out staring at the code.

> You'll also need to handle domain teardown, which right now just frees
> all the memory holding the p2m tables (see p2m_teardown()).  That will
> need somehow to check those tables for valid foreign mappings and DTRT
> about them.

Ok, I was thinking since this is dom0 if p2m is tearing down, nothing 
to worry about.  But, with control domains, and all that, we'd need to 
take care of the teardown path. So, I'll fix it.

I'll have another version out hopefully tomorrow, with
get_page* and put_page* in ept path, and p2m_teardown fixed up, and all
tested. I'm thinking something along the lines of:

ept_set_entry():
   ...
   if (p2mt == foreign)
   {
       page = mfn_to_page(mfn);
       fdom = page_get_owner(page);
       get_page(page, fdom);
   }
   table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
   .....


thanks a lot,
Mukesh

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-12  2:46             ` Mukesh Rathor
@ 2013-12-13  2:44               ` Mukesh Rathor
  2013-12-13 11:25                 ` Tim Deegan
  0 siblings, 1 reply; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-13  2:44 UTC (permalink / raw)
  To: Tim Deegan
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap,
	Julien Grall, eddie.dong, keir.xen, JBeulich

On Wed, 11 Dec 2013 18:46:06 -0800
Mukesh Rathor <mukesh.rathor@oracle.com> wrote:

> On Wed, 11 Dec 2013 15:29:03 +0100
> Tim Deegan <tim@xen.org> wrote:
..........
> I'll have another version out hopefully tomorrow, with
> get_page* and put_page* in ept path, and p2m_teardown fixed up, and
> all tested. I'm thinking something along the lines of:
> 
> ept_set_entry():
>    ...
>    if (p2mt == foreign)
>    {
>        page = mfn_to_page(mfn);
>        fdom = page_get_owner(page);
>        get_page(page, fdom);
>    }
>    table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
>    .....
> 
> 
> thanks a lot,
> Mukesh

Ok, this is what I came up with, please lmk. Thanks.
CCing Jun and Eddie for EPT changes.

-Mukesh

------------

In this patch, a new function, p2m_add_foreign(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Another function
p2m_remove_foreign() is added to remove such pages. Note, it is the
nature of such pages that a refcnt is held during their stay in the p2m.
The refcnt is added and released in the low level ept code for convenience.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   23 +++++++---
 xen/arch/x86/mm/p2m-ept.c |   30 +++++++++++---
 xen/arch/x86/mm/p2m.c     |   98 +++++++++++++++++++++++++++++++++++++++++++++
 xen/common/memory.c       |   12 +++++-
 xen/include/asm-arm/p2m.h |    8 +++-
 xen/include/asm-x86/p2m.h |    7 +++
 6 files changed, 163 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..6c2edc4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4522,7 +4522,8 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
 
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4582,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = p2m_add_foreign(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4654,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4680,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4702,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4789,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 08d1d72..3b9f764 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -36,8 +36,6 @@
 
 #define atomic_read_ept_entry(__pepte)                              \
     ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
-#define atomic_write_ept_entry(__pepte, __epte)                     \
-    write_atomic(&(__pepte)->epte, (__epte).epte)
 
 #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
 #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
@@ -46,6 +44,26 @@ static inline bool_t is_epte_valid(ept_entry_t *e)
     return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
 }
 
+static inline void write_ept_entry(ept_entry_t *entryptr, ept_entry_t *new)
+{
+    if ( p2m_is_foreign(entryptr->sa_p2mt) )
+        put_page(mfn_to_page(entryptr->mfn));
+
+    if ( p2m_is_foreign(new->sa_p2mt) )
+    {
+        struct page_info *page;
+        struct domain *fdom;
+
+        ASSERT(mfn_valid(new->mfn));
+        ASSERT(new->mfn != entryptr->mfn);
+        page = mfn_to_page(new->mfn);
+        fdom = page_get_owner(page);
+        get_page(page, fdom);
+    }
+    write_atomic(&entryptr->epte, new->epte);
+}
+
+
 static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_access_t access)
 {
     /* First apply type permissions */
@@ -378,7 +396,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
             ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
         }
 
-        atomic_write_ept_entry(ept_entry, new_entry);
+        write_ept_entry(ept_entry, &new_entry);
     }
     else
     {
@@ -398,7 +416,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
 
         /* now install the newly split ept sub-tree */
         /* NB: please make sure domian is paused and no in-fly VT-d DMA. */
-        atomic_write_ept_entry(ept_entry, split_ept_entry);
+        write_ept_entry(ept_entry, &split_ept_entry);
 
         /* then move to the level we want to make real changes */
         for ( ; i > target; i-- )
@@ -428,7 +446,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
 
         ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
 
-        atomic_write_ept_entry(ept_entry, new_entry);
+        write_ept_entry(ept_entry, &new_entry);
     }
 
     /* Track the highest gfn for which we have ever had a valid mapping */
@@ -665,7 +683,7 @@ static void ept_change_entry_type_page(mfn_t ept_page_mfn, int ept_page_level,
 
             e.sa_p2mt = nt;
             ept_p2m_type_to_flags(&e, nt, e.access);
-            atomic_write_ept_entry(&epte[i], e);
+            write_ept_entry(&epte[i], &e);
         }
     }
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0659ef1..29f7b23 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -479,6 +479,8 @@ void p2m_teardown(struct p2m_domain *p2m)
             /* Does not fail with ENOMEM given the DESTROY flag */
             BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
         }
+        if ( p2m_is_foreign(t) )
+            put_page(mfn_to_page(mfn));
     }
 
     p2m->phys_table = pagetable_null();
@@ -1741,6 +1743,102 @@ out_p2m_audit:
 #endif /* P2M_AUDIT */
 
 /*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted in lower level routines
+ *              so it is not lost while mapped here. The refcnt is released 
+ *              via the p2m_remove_foreign path.
+ * Returns: 0 ==> success
+ */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    mfn_t prev_mfn, mfn;
+    struct page_info *page;
+    int rc = 0;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, mfn_x(prev_mfn), 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, mfn) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
+        rc = -EINVAL;
+    }
+    put_page(page);
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    struct domain *foreign_dom;
+
+    ASSERT(is_pvh_domain(d));
+
+    mfn = get_gfn_query(d, gpfn, &p2mt);
+    if ( unlikely(!p2m_is_foreign(p2mt)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid type for gpfn:%lx domid:%d t:%d\n",
+                 gpfn, d->domain_id, p2mt);
+        return -EINVAL;
+    }
+    if ( unlikely(!mfn_valid(mfn)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
+                 gpfn, d->domain_id);
+        return -EINVAL;
+    }
+
+    foreign_dom = page_get_owner(mfn_to_page(mfn));
+    ASSERT(d != foreign_dom);
+
+    guest_physmap_remove_page(d, gpfn, mfn_x(mfn), 0);
+    put_gfn(d, gpfn);
+    return 0;
+}
+/*
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..53f67c1 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -678,6 +678,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt = INT_MAX;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,12 +694,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         */
+        page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
         if ( page )
         {
             guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
             put_page(page);
         }
+        else if ( p2m_is_foreign(p2mt) )
+            rc = p2m_remove_foreign(d, xrfp.gpfn);
         else
             rc = -ENOENT;
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..a664950 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -83,7 +83,7 @@ static inline struct page_info *get_page_from_gfn(
     struct page_info *page;
     unsigned long mfn = gmfn_to_mfn(d, gfn);
 
-    ASSERT(t == NULL);
+    ASSERT(!t || *t == INT_MAX);
 
     if (!mfn_valid(mfn))
         return NULL;
@@ -110,6 +110,12 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) (0 && (_t))
+static inline int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    return -ENOSYS;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6fc71a1..6371705 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -515,6 +515,13 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 /* Set foreign mfn in the current guest's p2m table. */
 int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
+/* Add foreign mapping to the guest's p2m table. */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom);
+
+/* Remove foreign mapping from the guest's p2m table. */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn);
+
 /* 
  * Populate-on-demand
  */
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-13  2:44               ` Mukesh Rathor
@ 2013-12-13 11:25                 ` Tim Deegan
  2013-12-13 11:39                   ` Jan Beulich
  2013-12-14  2:48                   ` Mukesh Rathor
  0 siblings, 2 replies; 46+ messages in thread
From: Tim Deegan @ 2013-12-13 11:25 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap,
	Julien Grall, eddie.dong, keir.xen, JBeulich

Hi,

At 18:44 -0800 on 12 Dec (1386870289), Mukesh Rathor wrote:
> In this patch, a new function, p2m_add_foreign(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Another function
> p2m_remove_foreign() is added to remove such pages. Note, it is the
> nature of such pages that a refcnt is held during their stay in the p2m.
> The refcnt is added and released in the low level ept code for convenience.

This looks much better, thank you.  I'm afraid it's still not ready to
be committed though:

> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index 08d1d72..3b9f764 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -36,8 +36,6 @@
>  
>  #define atomic_read_ept_entry(__pepte)                              \
>      ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
> -#define atomic_write_ept_entry(__pepte, __epte)                     \
> -    write_atomic(&(__pepte)->epte, (__epte).epte)
>  
>  #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
>  #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
> @@ -46,6 +44,26 @@ static inline bool_t is_epte_valid(ept_entry_t *e)
>      return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
>  }
>  
> +static inline void write_ept_entry(ept_entry_t *entryptr, ept_entry_t *new)
> +{
> +    if ( p2m_is_foreign(entryptr->sa_p2mt) )
> +        put_page(mfn_to_page(entryptr->mfn));

The usual idiom for such things is to take the new ref first, then
drop the old one.  That way you don't have to worry about temporarily
dropping a ref to zero (yes, I know the caller ought to have a ref too
but it's better to be safe than sorry)...

> +    if ( p2m_is_foreign(new->sa_p2mt) )
> +    {
> +        struct page_info *page;
> +        struct domain *fdom;
> +
> +        ASSERT(mfn_valid(new->mfn));
> +        ASSERT(new->mfn != entryptr->mfn);

...and you won't need this assertion.

> +        page = mfn_to_page(new->mfn);
> +        fdom = page_get_owner(page);
> +        get_page(page, fdom);
> +    }
> +    write_atomic(&entryptr->epte, new->epte);
> +}

Also, the non-EPT p2m code needs to have _something_ for this, even if
it's just an error return on all p2m_foreign mappings (and a warning
to console, and some more PVH fixme comments...)

> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 0659ef1..29f7b23 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -479,6 +479,8 @@ void p2m_teardown(struct p2m_domain *p2m)
>              /* Does not fail with ENOMEM given the DESTROY flag */
>              BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
>          }
> +        if ( p2m_is_foreign(t) )
> +            put_page(mfn_to_page(mfn));

That's not going to work. Just above here there's an early exit if the
domain has no shared pages.

This loop should go away anyway, since as it says in the comment above
it's a noop after the earlier teardown (and if there is work to do, a
foreach(0..max) loop is definitely the wrong thing to do).  So I'm
afraid you can't hook your teardown operation here.  You'll have to
walk the actual p2m pages.

Also, Jan may have an opinion about whether a teardown operation that
has to walk each p2m entry would have to be made preemptible.  I'm not
sure where we draw the line on such things.

Tim.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-13 11:25                 ` Tim Deegan
@ 2013-12-13 11:39                   ` Jan Beulich
  2013-12-13 19:02                     ` George Dunlap
  2013-12-14  2:48                   ` Mukesh Rathor
  1 sibling, 1 reply; 46+ messages in thread
From: Jan Beulich @ 2013-12-13 11:39 UTC (permalink / raw)
  To: Mukesh Rathor, Tim Deegan
  Cc: Xen-devel, Ian Campbell, george.dunlap, Julien Grall, eddie.dong,
	keir.xen, jun.nakajima

>>> On 13.12.13 at 12:25, Tim Deegan <tim@xen.org> wrote:
> Also, Jan may have an opinion about whether a teardown operation that
> has to walk each p2m entry would have to be made preemptible.  I'm not
> sure where we draw the line on such things.

There's no line here - "each" is too much in any case.

Jan

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-13 11:39                   ` Jan Beulich
@ 2013-12-13 19:02                     ` George Dunlap
  2013-12-16  7:47                       ` Jan Beulich
  0 siblings, 1 reply; 46+ messages in thread
From: George Dunlap @ 2013-12-13 19:02 UTC (permalink / raw)
  To: Jan Beulich, Mukesh Rathor, Tim Deegan
  Cc: Xen-devel, Ian Campbell, Julien Grall, eddie.dong, keir.xen,
	jun.nakajima

On 12/13/2013 11:39 AM, Jan Beulich wrote:
>>>> On 13.12.13 at 12:25, Tim Deegan <tim@xen.org> wrote:
>> Also, Jan may have an opinion about whether a teardown operation that
>> has to walk each p2m entry would have to be made preemptible.  I'm not
>> sure where we draw the line on such things.
> There's no line here - "each" is too much in any case.

By which you mean, yes a teardown operation that has to walk each p2m 
entry does need to be made pre-emptible?

It that's not what you mean, I'm at a loss for an interpretation of the 
above sentence. :-)

  -George

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-13 11:25                 ` Tim Deegan
  2013-12-13 11:39                   ` Jan Beulich
@ 2013-12-14  2:48                   ` Mukesh Rathor
  2013-12-16  8:40                     ` Jan Beulich
  1 sibling, 1 reply; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-14  2:48 UTC (permalink / raw)
  To: Tim Deegan
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap,
	Julien Grall, eddie.dong, keir.xen, JBeulich

On Fri, 13 Dec 2013 12:25:48 +0100
Tim Deegan <tim@xen.org> wrote:

> Hi,
> 
.....
> >  }
> >  
> > +static inline void write_ept_entry(ept_entry_t *entryptr,
> > ept_entry_t *new) +{
> > +    if ( p2m_is_foreign(entryptr->sa_p2mt) )
> > +        put_page(mfn_to_page(entryptr->mfn));
> 
> The usual idiom for such things is to take the new ref first, then
> drop the old one.  That way you don't have to worry about temporarily
> dropping a ref to zero (yes, I know the caller ought to have a ref too
> but it's better to be safe than sorry)...
> 
> > +    if ( p2m_is_foreign(new->sa_p2mt) )
> > +    {
> > +        struct page_info *page;
> > +        struct domain *fdom;
> > +
> > +        ASSERT(mfn_valid(new->mfn));
> > +        ASSERT(new->mfn != entryptr->mfn);
> 
> ...and you won't need this assertion.

makes sense. Done.

> > +        page = mfn_to_page(new->mfn);
> > +        fdom = page_get_owner(page);
> > +        get_page(page, fdom);
> > +    }
> > +    write_atomic(&entryptr->epte, new->epte);
> > +}
> 
> Also, the non-EPT p2m code needs to have _something_ for this, even if
> it's just an error return on all p2m_foreign mappings (and a warning
> to console, and some more PVH fixme comments...)

Yup, added check in p2m_set_entry().

> > diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> > index 0659ef1..29f7b23 100644
> > --- a/xen/arch/x86/mm/p2m.c
> > +++ b/xen/arch/x86/mm/p2m.c
> > @@ -479,6 +479,8 @@ void p2m_teardown(struct p2m_domain *p2m)
> >              /* Does not fail with ENOMEM given the DESTROY flag */
> >              BUG_ON(mem_sharing_unshare_page(d, gfn,
> > MEM_SHARING_DESTROY_GFN)); }
> > +        if ( p2m_is_foreign(t) )
> > +            put_page(mfn_to_page(mfn));
> 
> That's not going to work. Just above here there's an early exit if the
> domain has no shared pages.

Ooopss... I should have noticed that, sorry I wasted your time. I 
rushed thru it. (The function p2m_teardown is misnomer, IMO it should
be p2m_teardown_shared, so one doesn't take it for granted. anyways.)

> This loop should go away anyway, since as it says in the comment above
> it's a noop after the earlier teardown (and if there is work to do, a
> foreach(0..max) loop is definitely the wrong thing to do).  So I'm
> afraid you can't hook your teardown operation here.  You'll have to
> walk the actual p2m pages.
> 
> Also, Jan may have an opinion about whether a teardown operation that
> has to walk each p2m entry would have to be made preemptible.  I'm not
> sure where we draw the line on such things.

Since at present teardown cleanup of foreign is not really that important
as its only applicable to dom0, let me submit another patch for it on 
Mon with few ideas. That would also keep this patch size reasonable,
and keep you from having to look at the same code over and over. 

So, please take a look at the version below with above two fixes. If
you approve it, i can resubmit the entire series rebased to latest 
with your ack on Monday, and the series can go in while we resolve
the p2m teardown.

thanks for your time,
Mukesh

-----------------------------

In this patch, a new function, p2m_add_foreign(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Another function
p2m_remove_foreign() is added to remove such pages. Note, it is the
nature of such pages that a refcnt is held during their stay in the p2m.
The refcnt is added and released in the low level ept code for convenience.
The cleanup of foreign pages from p2m upon it's destruction is done 
under a separate patch.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   23 +++++++---
 xen/arch/x86/mm/p2m-ept.c |   29 +++++++++++---
 xen/arch/x86/mm/p2m-pt.c  |    9 ++++-
 xen/arch/x86/mm/p2m.c     |   96 +++++++++++++++++++++++++++++++++++++++++++++
 xen/common/memory.c       |   12 +++++-
 xen/include/asm-arm/p2m.h |    8 +++-
 xen/include/asm-x86/p2m.h |    7 +++
 7 files changed, 168 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..6c2edc4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4522,7 +4522,8 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
 
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4582,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = p2m_add_foreign(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4654,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4680,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4702,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4789,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 08d1d72..6fb5b86 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -36,8 +36,6 @@
 
 #define atomic_read_ept_entry(__pepte)                              \
     ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
-#define atomic_write_ept_entry(__pepte, __epte)                     \
-    write_atomic(&(__pepte)->epte, (__epte).epte)
 
 #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
 #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
@@ -46,6 +44,25 @@ static inline bool_t is_epte_valid(ept_entry_t *e)
     return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
 }
 
+static inline void write_ept_entry(ept_entry_t *entryptr, ept_entry_t *new)
+{
+    if ( p2m_is_foreign(new->sa_p2mt) )
+    {
+        struct page_info *page;
+        struct domain *fdom;
+
+        ASSERT(mfn_valid(new->mfn));
+        page = mfn_to_page(new->mfn);
+        fdom = page_get_owner(page);
+        get_page(page, fdom);
+    }
+    if ( p2m_is_foreign(entryptr->sa_p2mt) )
+        put_page(mfn_to_page(entryptr->mfn));
+
+    write_atomic(&entryptr->epte, new->epte);
+}
+
+
 static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_access_t access)
 {
     /* First apply type permissions */
@@ -378,7 +395,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
             ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
         }
 
-        atomic_write_ept_entry(ept_entry, new_entry);
+        write_ept_entry(ept_entry, &new_entry);
     }
     else
     {
@@ -398,7 +415,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
 
         /* now install the newly split ept sub-tree */
         /* NB: please make sure domian is paused and no in-fly VT-d DMA. */
-        atomic_write_ept_entry(ept_entry, split_ept_entry);
+        write_ept_entry(ept_entry, &split_ept_entry);
 
         /* then move to the level we want to make real changes */
         for ( ; i > target; i-- )
@@ -428,7 +445,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
 
         ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
 
-        atomic_write_ept_entry(ept_entry, new_entry);
+        write_ept_entry(ept_entry, &new_entry);
     }
 
     /* Track the highest gfn for which we have ever had a valid mapping */
@@ -665,7 +682,7 @@ static void ept_change_entry_type_page(mfn_t ept_page_mfn, int ept_page_level,
 
             e.sa_p2mt = nt;
             ept_p2m_type_to_flags(&e, nt, e.access);
-            atomic_write_ept_entry(&epte[i], e);
+            write_ept_entry(&epte[i], &e);
         }
     }
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 09b60ce..766fd67 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -276,7 +276,7 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table,
     return 1;
 }
 
-// Returns 0 on error (out of memory)
+// Returns 0 on error (out of memory, or unimplemented p2m type)
 static int
 p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
               unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
@@ -312,6 +312,13 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
         __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
+    if ( p2m_is_foreign(p2mt) )
+    {
+        /* pvh fixme: foreign types are only supported on ept at present */
+        gdprintk(XENLOG_WARNING, "Unimplemented foreign p2m type.\n");
+        return 0;
+    }
+
     if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
                          L4_PAGETABLE_SHIFT - PAGE_SHIFT,
                          L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0659ef1..441d151 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1741,6 +1741,102 @@ out_p2m_audit:
 #endif /* P2M_AUDIT */
 
 /*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted in lower level routines
+ *              so it is not lost while mapped here. The refcnt is released
+ *              via the p2m_remove_foreign path.
+ * Returns: 0 ==> success
+ */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    mfn_t prev_mfn, mfn;
+    struct page_info *page;
+    int rc = 0;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, mfn_x(prev_mfn), 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, mfn) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
+        rc = -EINVAL;
+    }
+    put_page(page);
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    struct domain *foreign_dom;
+
+    ASSERT(is_pvh_domain(d));
+
+    mfn = get_gfn_query(d, gpfn, &p2mt);
+    if ( unlikely(!p2m_is_foreign(p2mt)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid type for gpfn:%lx domid:%d t:%d\n",
+                 gpfn, d->domain_id, p2mt);
+        return -EINVAL;
+    }
+    if ( unlikely(!mfn_valid(mfn)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
+                 gpfn, d->domain_id);
+        return -EINVAL;
+    }
+
+    foreign_dom = page_get_owner(mfn_to_page(mfn));
+    ASSERT(d != foreign_dom);
+
+    guest_physmap_remove_page(d, gpfn, mfn_x(mfn), 0);
+    put_gfn(d, gpfn);
+    return 0;
+}
+/*
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..53f67c1 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -678,6 +678,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt = INT_MAX;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,12 +694,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         */
+        page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
         if ( page )
         {
             guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
             put_page(page);
         }
+        else if ( p2m_is_foreign(p2mt) )
+            rc = p2m_remove_foreign(d, xrfp.gpfn);
         else
             rc = -ENOENT;
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..a664950 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -83,7 +83,7 @@ static inline struct page_info *get_page_from_gfn(
     struct page_info *page;
     unsigned long mfn = gmfn_to_mfn(d, gfn);
 
-    ASSERT(t == NULL);
+    ASSERT(!t || *t == INT_MAX);
 
     if (!mfn_valid(mfn))
         return NULL;
@@ -110,6 +110,12 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) (0 && (_t))
+static inline int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    return -ENOSYS;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6fc71a1..6371705 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -515,6 +515,13 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 /* Set foreign mfn in the current guest's p2m table. */
 int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
+/* Add foreign mapping to the guest's p2m table. */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom);
+
+/* Remove foreign mapping from the guest's p2m table. */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn);
+
 /* 
  * Populate-on-demand
  */
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-13 19:02                     ` George Dunlap
@ 2013-12-16  7:47                       ` Jan Beulich
  0 siblings, 0 replies; 46+ messages in thread
From: Jan Beulich @ 2013-12-16  7:47 UTC (permalink / raw)
  To: George Dunlap, Mukesh Rathor, Tim Deegan
  Cc: Xen-devel, Ian Campbell, Julien Grall, eddie.dong, keir.xen,
	jun.nakajima

>>> On 13.12.13 at 20:02, George Dunlap <george.dunlap@eu.citrix.com> wrote:
> On 12/13/2013 11:39 AM, Jan Beulich wrote:
>>>>> On 13.12.13 at 12:25, Tim Deegan <tim@xen.org> wrote:
>>> Also, Jan may have an opinion about whether a teardown operation that
>>> has to walk each p2m entry would have to be made preemptible.  I'm not
>>> sure where we draw the line on such things.
>> There's no line here - "each" is too much in any case.
> 
> By which you mean, yes a teardown operation that has to walk each p2m 
> entry does need to be made pre-emptible?
> 
> It that's not what you mean, I'm at a loss for an interpretation of the 
> above sentence. :-)

Any operation - teardown or other - that needs to walk all of a
domain's pages has to be preemptible. So yes, your interpretation
of my earlier response was right.

Jan

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-14  2:48                   ` Mukesh Rathor
@ 2013-12-16  8:40                     ` Jan Beulich
  2013-12-16 23:27                       ` Mukesh Rathor
  0 siblings, 1 reply; 46+ messages in thread
From: Jan Beulich @ 2013-12-16  8:40 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, Tim Deegan, Julien Grall,
	eddie.dong, keir.xen, jun.nakajima

>>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
>> Also, Jan may have an opinion about whether a teardown operation that
>> has to walk each p2m entry would have to be made preemptible.  I'm not
>> sure where we draw the line on such things.
> 
> Since at present teardown cleanup of foreign is not really that important
> as its only applicable to dom0, let me submit another patch for it on 
> Mon with few ideas. That would also keep this patch size reasonable,
> and keep you from having to look at the same code over and over. 
> 
> So, please take a look at the version below with above two fixes. If
> you approve it, i can resubmit the entire series rebased to latest 
> with your ack on Monday, and the series can go in while we resolve
> the p2m teardown.

Going through the patch again, I'm not seeing any loop being
added. Am I missing something here?

> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -36,8 +36,6 @@
>  
>  #define atomic_read_ept_entry(__pepte)                              \
>      ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
> -#define atomic_write_ept_entry(__pepte, __epte)                     \
> -    write_atomic(&(__pepte)->epte, (__epte).epte)
>  
>  #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
>  #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
> @@ -46,6 +44,25 @@ static inline bool_t is_epte_valid(ept_entry_t *e)
>      return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
>  }
>  
> +static inline void write_ept_entry(ept_entry_t *entryptr, ept_entry_t *new)

So why do you drop the "atomic_" prefix here?

Also the second parameter could be "const"...

Jan

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-16  8:40                     ` Jan Beulich
@ 2013-12-16 23:27                       ` Mukesh Rathor
  2013-12-16 23:44                         ` Julien Grall
                                           ` (2 more replies)
  0 siblings, 3 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-16 23:27 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Xen-devel, Ian Campbell, george.dunlap, Tim Deegan, Julien Grall,
	eddie.dong, keir.xen, jun.nakajima

On Mon, 16 Dec 2013 08:40:41 +0000
"Jan Beulich" <JBeulich@suse.com> wrote:

> >>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
> >>> wrote:
> >> Also, Jan may have an opinion about whether a teardown operation
> >> that has to walk each p2m entry would have to be made
> >> preemptible.  I'm not sure where we draw the line on such things.
> > 
> > Since at present teardown cleanup of foreign is not really that
> > important as its only applicable to dom0, let me submit another
> > patch for it on Mon with few ideas. That would also keep this patch
> > size reasonable, and keep you from having to look at the same code
> > over and over. 
> > 
> > So, please take a look at the version below with above two fixes. If
> > you approve it, i can resubmit the entire series rebased to latest 
> > with your ack on Monday, and the series can go in while we resolve
> > the p2m teardown.
> 
> Going through the patch again, I'm not seeing any loop being
> added. Am I missing something here?

Yes. Since the destruction of p2m leaking foreign pages only applies
to control domain being destroyed, i don't think it is that critical
that part get into 4.4. So, I'm submitting a separate patch for it,
like said above.

> > --- a/xen/arch/x86/mm/p2m-ept.c
> > +++ b/xen/arch/x86/mm/p2m-ept.c
> > @@ -36,8 +36,6 @@
> >  
> >  #define
> > atomic_read_ept_entry(__pepte)                              \
> > ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
> > -#define atomic_write_ept_entry(__pepte,
> > __epte)                     \
> > -    write_atomic(&(__pepte)->epte, (__epte).epte)
> >  
> >  #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
> >  #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
> > @@ -46,6 +44,25 @@ static inline bool_t is_epte_valid(ept_entry_t
> > *e) return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
> >  }
> >  
> > +static inline void write_ept_entry(ept_entry_t *entryptr,
> > ept_entry_t *new)
> 
> So why do you drop the "atomic_" prefix here?

To distinguish it from the older atomic_* macro which did nothing but
atomically write the entry. But if it helps get your approval, I added
atomic prefix. 

> Also the second parameter could be "const"...

Ok.

Final version below:

thanks
Mukesh
---------------------

In this patch, a new function, p2m_add_foreign(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Another function
p2m_remove_foreign() is added to remove such pages. Note, it is the
nature of such pages that a refcnt is held during their stay in the p2m.
The refcnt is added and released in the low level ept code for convenience.
The cleanup of foreign pages from p2m upon it's destruction, is submitted
subsequently under a separate patch.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   23 +++++++---
 xen/arch/x86/mm/p2m-ept.c |   30 +++++++++++---
 xen/arch/x86/mm/p2m-pt.c  |    9 ++++-
 xen/arch/x86/mm/p2m.c     |   96 +++++++++++++++++++++++++++++++++++++++++++++
 xen/common/memory.c       |   12 +++++-
 xen/include/asm-arm/p2m.h |    8 +++-
 xen/include/asm-x86/p2m.h |    7 +++
 7 files changed, 169 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e3da479..6c2edc4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4522,7 +4522,8 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
 
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4582,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = p2m_add_foreign(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4654,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 return rc;
 
@@ -4672,11 +4680,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4702,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4789,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 08d1d72..0ba2365 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -36,8 +36,6 @@
 
 #define atomic_read_ept_entry(__pepte)                              \
     ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
-#define atomic_write_ept_entry(__pepte, __epte)                     \
-    write_atomic(&(__pepte)->epte, (__epte).epte)
 
 #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
 #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
@@ -46,6 +44,26 @@ static inline bool_t is_epte_valid(ept_entry_t *e)
     return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
 }
 
+static inline void atomic_write_ept_entry(ept_entry_t *entryptr,
+                                          const ept_entry_t *new)
+{
+    if ( p2m_is_foreign(new->sa_p2mt) )
+    {
+        struct page_info *page;
+        struct domain *fdom;
+
+        ASSERT(mfn_valid(new->mfn));
+        page = mfn_to_page(new->mfn);
+        fdom = page_get_owner(page);
+        get_page(page, fdom);
+    }
+    if ( p2m_is_foreign(entryptr->sa_p2mt) )
+        put_page(mfn_to_page(entryptr->mfn));
+
+    write_atomic(&entryptr->epte, new->epte);
+}
+
+
 static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_access_t access)
 {
     /* First apply type permissions */
@@ -378,7 +396,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
             ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
         }
 
-        atomic_write_ept_entry(ept_entry, new_entry);
+        atomic_write_ept_entry(ept_entry, &new_entry);
     }
     else
     {
@@ -398,7 +416,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
 
         /* now install the newly split ept sub-tree */
         /* NB: please make sure domian is paused and no in-fly VT-d DMA. */
-        atomic_write_ept_entry(ept_entry, split_ept_entry);
+        atomic_write_ept_entry(ept_entry, &split_ept_entry);
 
         /* then move to the level we want to make real changes */
         for ( ; i > target; i-- )
@@ -428,7 +446,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
 
         ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
 
-        atomic_write_ept_entry(ept_entry, new_entry);
+        atomic_write_ept_entry(ept_entry, &new_entry);
     }
 
     /* Track the highest gfn for which we have ever had a valid mapping */
@@ -665,7 +683,7 @@ static void ept_change_entry_type_page(mfn_t ept_page_mfn, int ept_page_level,
 
             e.sa_p2mt = nt;
             ept_p2m_type_to_flags(&e, nt, e.access);
-            atomic_write_ept_entry(&epte[i], e);
+            atomic_write_ept_entry(&epte[i], &e);
         }
     }
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 09b60ce..766fd67 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -276,7 +276,7 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table,
     return 1;
 }
 
-// Returns 0 on error (out of memory)
+// Returns 0 on error (out of memory, or unimplemented p2m type)
 static int
 p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
               unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
@@ -312,6 +312,13 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
         __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
+    if ( p2m_is_foreign(p2mt) )
+    {
+        /* pvh fixme: foreign types are only supported on ept at present */
+        gdprintk(XENLOG_WARNING, "Unimplemented foreign p2m type.\n");
+        return 0;
+    }
+
     if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
                          L4_PAGETABLE_SHIFT - PAGE_SHIFT,
                          L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0659ef1..441d151 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1741,6 +1741,102 @@ out_p2m_audit:
 #endif /* P2M_AUDIT */
 
 /*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted in lower level routines
+ *              so it is not lost while mapped here. The refcnt is released
+ *              via the p2m_remove_foreign path.
+ * Returns: 0 ==> success
+ */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    mfn_t prev_mfn, mfn;
+    struct page_info *page;
+    int rc = 0;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, mfn_x(prev_mfn), 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, mfn) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
+        rc = -EINVAL;
+    }
+    put_page(page);
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    struct domain *foreign_dom;
+
+    ASSERT(is_pvh_domain(d));
+
+    mfn = get_gfn_query(d, gpfn, &p2mt);
+    if ( unlikely(!p2m_is_foreign(p2mt)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid type for gpfn:%lx domid:%d t:%d\n",
+                 gpfn, d->domain_id, p2mt);
+        return -EINVAL;
+    }
+    if ( unlikely(!mfn_valid(mfn)) )
+    {
+        put_gfn(d, gpfn);
+        gdprintk(XENLOG_WARNING, "Invalid mfn for gpfn:%lx domid:%d\n",
+                 gpfn, d->domain_id);
+        return -EINVAL;
+    }
+
+    foreign_dom = page_get_owner(mfn_to_page(mfn));
+    ASSERT(d != foreign_dom);
+
+    guest_physmap_remove_page(d, gpfn, mfn_x(mfn), 0);
+    put_gfn(d, gpfn);
+    return 0;
+}
+/*
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
diff --git a/xen/common/memory.c b/xen/common/memory.c
index eb7b72b..53f67c1 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -678,6 +678,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
         struct domain *d;
+        p2m_type_t p2mt = INT_MAX;
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -693,12 +694,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+        /*
+         * If autotranslate guest, (eg pvh), the gfn could be mapped to a mfn
+         * from foreign domain by the user space tool during domain creation.
+         * We need to check for that, free it up from the p2m, and release
+         * refcnt on it. In such a case, page would be NULL and the following
+         * call would not have refcnt'd the page.
+         */
+        page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
         if ( page )
         {
             guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
             put_page(page);
         }
+        else if ( p2m_is_foreign(p2mt) )
+            rc = p2m_remove_foreign(d, xrfp.gpfn);
         else
             rc = -ENOENT;
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index c660820..a664950 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -83,7 +83,7 @@ static inline struct page_info *get_page_from_gfn(
     struct page_info *page;
     unsigned long mfn = gmfn_to_mfn(d, gfn);
 
-    ASSERT(t == NULL);
+    ASSERT(!t || *t == INT_MAX);
 
     if (!mfn_valid(mfn))
         return NULL;
@@ -110,6 +110,12 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+#define p2m_is_foreign(_t) (0 && (_t))
+static inline int p2m_remove_foreign(struct domain *d, unsigned long gpfn)
+{
+    return -ENOSYS;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6fc71a1..6371705 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -515,6 +515,13 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 /* Set foreign mfn in the current guest's p2m table. */
 int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
+/* Add foreign mapping to the guest's p2m table. */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom);
+
+/* Remove foreign mapping from the guest's p2m table. */
+int p2m_remove_foreign(struct domain *d, unsigned long gpfn);
+
 /* 
  * Populate-on-demand
  */
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-16 23:27                       ` Mukesh Rathor
@ 2013-12-16 23:44                         ` Julien Grall
  2013-12-17  1:51                           ` Mukesh Rathor
  2013-12-17  2:33                         ` Mukesh Rathor
  2013-12-17 10:10                         ` Tim Deegan
  2 siblings, 1 reply; 46+ messages in thread
From: Julien Grall @ 2013-12-16 23:44 UTC (permalink / raw)
  To: Mukesh Rathor, Jan Beulich
  Cc: Xen-devel, Ian Campbell, george.dunlap, Tim Deegan, eddie.dong,
	keir.xen, jun.nakajima



On 12/16/2013 11:27 PM, Mukesh Rathor wrote:
> On Mon, 16 Dec 2013 08:40:41 +0000
> "Jan Beulich" <JBeulich@suse.com> wrote:
>
>>>>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
>>>>> wrote:
>>>> Also, Jan may have an opinion about whether a teardown operation
>>>> that has to walk each p2m entry would have to be made
>>>> preemptible.  I'm not sure where we draw the line on such things.
>>>
>>> Since at present teardown cleanup of foreign is not really that
>>> important as its only applicable to dom0, let me submit another
>>> patch for it on Mon with few ideas. That would also keep this patch
>>> size reasonable, and keep you from having to look at the same code
>>> over and over.
>>>
>>> So, please take a look at the version below with above two fixes. If
>>> you approve it, i can resubmit the entire series rebased to latest
>>> with your ack on Monday, and the series can go in while we resolve
>>> the p2m teardown.
>>
>> Going through the patch again, I'm not seeing any loop being
>> added. Am I missing something here?
>
> Yes. Since the destruction of p2m leaking foreign pages only applies
> to control domain being destroyed, i don't think it is that critical
> that part get into 4.4. So, I'm submitting a separate patch for it,
> like said above.
>
>>> --- a/xen/arch/x86/mm/p2m-ept.c
>>> +++ b/xen/arch/x86/mm/p2m-ept.c
>>> @@ -36,8 +36,6 @@
>>>
>>>   #define
>>> atomic_read_ept_entry(__pepte)                              \
>>> ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
>>> -#define atomic_write_ept_entry(__pepte,
>>> __epte)                     \
>>> -    write_atomic(&(__pepte)->epte, (__epte).epte)
>>>
>>>   #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
>>>   #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
>>> @@ -46,6 +44,25 @@ static inline bool_t is_epte_valid(ept_entry_t
>>> *e) return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
>>>   }
>>>
>>> +static inline void write_ept_entry(ept_entry_t *entryptr,
>>> ept_entry_t *new)
>>
>> So why do you drop the "atomic_" prefix here?
>
> To distinguish it from the older atomic_* macro which did nothing but
> atomically write the entry. But if it helps get your approval, I added
> atomic prefix.
>
>> Also the second parameter could be "const"...
>
> Ok.
>
> Final version below:
>
> thanks
> Mukesh
> ---------------------
>
> In this patch, a new function, p2m_add_foreign(), is added
> to map pages from foreign guest into current dom0 for domU creation.
> Such pages are typed p2m_map_foreign. Another function
> p2m_remove_foreign() is added to remove such pages. Note, it is the
> nature of such pages that a refcnt is held during their stay in the p2m.
> The refcnt is added and released in the low level ept code for convenience.
> The cleanup of foreign pages from p2m upon it's destruction, is submitted
> subsequently under a separate patch.
>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> ---
>   xen/arch/x86/mm.c         |   23 +++++++---
>   xen/arch/x86/mm/p2m-ept.c |   30 +++++++++++---
>   xen/arch/x86/mm/p2m-pt.c  |    9 ++++-
>   xen/arch/x86/mm/p2m.c     |   96 +++++++++++++++++++++++++++++++++++++++++++++
>   xen/common/memory.c       |   12 +++++-
>   xen/include/asm-arm/p2m.h |    8 +++-
>   xen/include/asm-x86/p2m.h |    7 +++
>   7 files changed, 169 insertions(+), 16 deletions(-)

Following the discussion we had on ARM thread (see 
https://patches.linaro.org/22361/), the approach is to remove specific 
patch for p2m foreign on common code. So get_page_from_gfn must handle 
reference on foreign mapping.

The code is pretty simple on ARM, see: https://patches.linaro.org/22536/
and I don't see why this kind of modification can't go on x86 part.

Also, can you remove all ARM specific code in this patch?

Cheers,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-16 23:44                         ` Julien Grall
@ 2013-12-17  1:51                           ` Mukesh Rathor
  0 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-17  1:51 UTC (permalink / raw)
  To: Julien Grall
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap, Tim Deegan,
	eddie.dong, keir.xen, Jan Beulich

On Mon, 16 Dec 2013 23:44:37 +0000
Julien Grall <julien.grall@linaro.org> wrote:

> 
> 
> On 12/16/2013 11:27 PM, Mukesh Rathor wrote:
> > On Mon, 16 Dec 2013 08:40:41 +0000
> > "Jan Beulich" <JBeulich@suse.com> wrote:
> >
> >>>>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
> >>>>> wrote:
> >>>> Also, Jan may have an opinion about whether a teardown operation
> >>>> that has to walk each p2m entry would have to be made
> >>>> preemptible.  I'm not sure where we draw the line on such things.
> >>>
> >>> Since at present teardown cleanup of foreign is not really that
> >>> important as its only applicable to dom0, let me submit another
> >>> patch for it on Mon with few ideas. That would also keep this
> >>> patch size reasonable, and keep you from having to look at the
> >>> same code over and over.
> >>>
> >>> So, please take a look at the version below with above two fixes.
> >>> If you approve it, i can resubmit the entire series rebased to
> >>> latest with your ack on Monday, and the series can go in while we
> >>> resolve the p2m teardown.
> >>
> >> Going through the patch again, I'm not seeing any loop being
> >> added. Am I missing something here?
> >
> > Yes. Since the destruction of p2m leaking foreign pages only applies
> > to control domain being destroyed, i don't think it is that critical
> > that part get into 4.4. So, I'm submitting a separate patch for it,
> > like said above.
> >
> >>> --- a/xen/arch/x86/mm/p2m-ept.c
> >>> +++ b/xen/arch/x86/mm/p2m-ept.c
> >>> @@ -36,8 +36,6 @@
> >>>
> >>>   #define
> >>> atomic_read_ept_entry(__pepte)                              \
> >>> ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
> >>> -#define atomic_write_ept_entry(__pepte,
> >>> __epte)                     \
> >>> -    write_atomic(&(__pepte)->epte, (__epte).epte)
> >>>
> >>>   #define is_epte_present(ept_entry)      ((ept_entry)->epte &
> >>> 0x7) #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
> >>> @@ -46,6 +44,25 @@ static inline bool_t is_epte_valid(ept_entry_t
> >>> *e) return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
> >>>   }
> >>>
> >>> +static inline void write_ept_entry(ept_entry_t *entryptr,
> >>> ept_entry_t *new)
> >>
> >> So why do you drop the "atomic_" prefix here?
> >
> > To distinguish it from the older atomic_* macro which did nothing
> > but atomically write the entry. But if it helps get your approval,
> > I added atomic prefix.
> >
> >> Also the second parameter could be "const"...
> >
> > Ok.
> >
> > Final version below:
> >
> > thanks
> > Mukesh
> > ---------------------
> >
> > In this patch, a new function, p2m_add_foreign(), is added
> > to map pages from foreign guest into current dom0 for domU creation.
> > Such pages are typed p2m_map_foreign. Another function
> > p2m_remove_foreign() is added to remove such pages. Note, it is the
> > nature of such pages that a refcnt is held during their stay in the
> > p2m. The refcnt is added and released in the low level ept code for
> > convenience. The cleanup of foreign pages from p2m upon it's
> > destruction, is submitted subsequently under a separate patch.
> >
> > Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> > ---
> >   xen/arch/x86/mm.c         |   23 +++++++---
> >   xen/arch/x86/mm/p2m-ept.c |   30 +++++++++++---
> >   xen/arch/x86/mm/p2m-pt.c  |    9 ++++-
> >   xen/arch/x86/mm/p2m.c     |   96
> > +++++++++++++++++++++++++++++++++++++++++++++
> > xen/common/memory.c       |   12 +++++- xen/include/asm-arm/p2m.h
> > |    8 +++- xen/include/asm-x86/p2m.h |    7 +++
> >   7 files changed, 169 insertions(+), 16 deletions(-)
> 
> Following the discussion we had on ARM thread (see 
> https://patches.linaro.org/22361/), the approach is to remove
> specific patch for p2m foreign on common code. So get_page_from_gfn
> must handle reference on foreign mapping.

Fine, I'll just cleanup and move that to guest_physmap.... it's getting
a bit frustrating now.....

> The code is pretty simple on ARM, see:
> https://patches.linaro.org/22536/ and I don't see why this kind of
> modification can't go on x86 part.

It can, but previously it was not deemed necessary. 

> Also, can you remove all ARM specific code in this patch?

Not until your stuff is checked in.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-16 23:27                       ` Mukesh Rathor
  2013-12-16 23:44                         ` Julien Grall
@ 2013-12-17  2:33                         ` Mukesh Rathor
  2013-12-17 10:10                         ` Tim Deegan
  2 siblings, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:33 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap, eddie.dong,
	Julien Grall, Tim Deegan, keir.xen, Jan Beulich

On Mon, 16 Dec 2013 15:27:22 -0800
Mukesh Rathor <mukesh.rathor@oracle.com> wrote:

> On Mon, 16 Dec 2013 08:40:41 +0000
> "Jan Beulich" <JBeulich@suse.com> wrote:
> 
> > >>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
> > >>> wrote:
> > >> Also, Jan may have an opinion about whether a teardown operation
> > >> that has to walk each p2m entry would have to be made
> > >> preemptible.  I'm not sure where we draw the line on such things.
> > > 
> > > Since at present teardown cleanup of foreign is not really that
> > > important as its only applicable to dom0, let me submit another
> > > patch for it on Mon with few ideas. That would also keep this
> > > patch size reasonable, and keep you from having to look at the
> > > same code over and over. 
> > > 
> > > So, please take a look at the version below with above two fixes.
> > > If you approve it, i can resubmit the entire series rebased to
> > > latest with your ack on Monday, and the series can go in while we
> > > resolve the p2m teardown.
> > 
> > Going through the patch again, I'm not seeing any loop being
> > added. Am I missing something here?
> 
> Yes. Since the destruction of p2m leaking foreign pages only applies
> to control domain being destroyed, i don't think it is that critical
> that part get into 4.4. So, I'm submitting a separate patch for it,
> like said above.
> 
> > > --- a/xen/arch/x86/mm/p2m-ept.c
> > > +++ b/xen/arch/x86/mm/p2m-ept.c
> > > @@ -36,8 +36,6 @@
> > >  
> > >  #define
> > > atomic_read_ept_entry(__pepte)                              \
> > > ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
> > > -#define atomic_write_ept_entry(__pepte,
> > > __epte)                     \
> > > -    write_atomic(&(__pepte)->epte, (__epte).epte)
> > >  
> > >  #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
> > >  #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
> > > @@ -46,6 +44,25 @@ static inline bool_t is_epte_valid(ept_entry_t
> > > *e) return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
> > >  }
> > >  
> > > +static inline void write_ept_entry(ept_entry_t *entryptr,
> > > ept_entry_t *new)
> > 
> > So why do you drop the "atomic_" prefix here?
> 
> To distinguish it from the older atomic_* macro which did nothing but
> atomically write the entry. But if it helps get your approval, I added
> atomic prefix. 
> 
> > Also the second parameter could be "const"...
> 
> Ok.
> 
> Final version below:

Please ignore this, sending V7 with changes to this again after 
comments from Julien.

thanks
mukesh

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-16 23:27                       ` Mukesh Rathor
  2013-12-16 23:44                         ` Julien Grall
  2013-12-17  2:33                         ` Mukesh Rathor
@ 2013-12-17 10:10                         ` Tim Deegan
  2013-12-17 23:24                           ` Mukesh Rathor
  2013-12-18  2:34                           ` Mukesh Rathor
  2 siblings, 2 replies; 46+ messages in thread
From: Tim Deegan @ 2013-12-17 10:10 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap,
	Julien Grall, eddie.dong, keir.xen, Jan Beulich

At 15:27 -0800 on 16 Dec (1387204042), Mukesh Rathor wrote:
> On Mon, 16 Dec 2013 08:40:41 +0000
> "Jan Beulich" <JBeulich@suse.com> wrote:
> 
> > >>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
> > >>> wrote:
> > >> Also, Jan may have an opinion about whether a teardown operation
> > >> that has to walk each p2m entry would have to be made
> > >> preemptible.  I'm not sure where we draw the line on such things.
> > > 
> > > Since at present teardown cleanup of foreign is not really that
> > > important as its only applicable to dom0, let me submit another
> > > patch for it on Mon with few ideas. That would also keep this patch
> > > size reasonable, and keep you from having to look at the same code
> > > over and over. 
> > > 
> > > So, please take a look at the version below with above two fixes. If
> > > you approve it, i can resubmit the entire series rebased to latest 
> > > with your ack on Monday, and the series can go in while we resolve
> > > the p2m teardown.
> > 
> > Going through the patch again, I'm not seeing any loop being
> > added. Am I missing something here?
> 
> Yes. Since the destruction of p2m leaking foreign pages only applies
> to control domain being destroyed, i don't think it is that critical
> that part get into 4.4.

Nak.  We can't let the tools break the refcounting.

Tim.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-17 10:10                         ` Tim Deegan
@ 2013-12-17 23:24                           ` Mukesh Rathor
  2013-12-18  2:34                           ` Mukesh Rathor
  1 sibling, 0 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-17 23:24 UTC (permalink / raw)
  To: Tim Deegan
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap,
	Julien Grall, eddie.dong, keir.xen, Jan Beulich

On Tue, 17 Dec 2013 11:10:46 +0100
Tim Deegan <tim@xen.org> wrote:

> At 15:27 -0800 on 16 Dec (1387204042), Mukesh Rathor wrote:
> > On Mon, 16 Dec 2013 08:40:41 +0000
> > "Jan Beulich" <JBeulich@suse.com> wrote:
> > 
> > > >>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
> > > >>> wrote:
> > > >> Also, Jan may have an opinion about whether a teardown
> > > >> operation that has to walk each p2m entry would have to be made
> > > >> preemptible.  I'm not sure where we draw the line on such
> > > >> things.
> > > > 
> > > > Since at present teardown cleanup of foreign is not really that
> > > > important as its only applicable to dom0, let me submit another
> > > > patch for it on Mon with few ideas. That would also keep this
> > > > patch size reasonable, and keep you from having to look at the
> > > > same code over and over. 
> > > > 
> > > > So, please take a look at the version below with above two
> > > > fixes. If you approve it, i can resubmit the entire series
> > > > rebased to latest with your ack on Monday, and the series can
> > > > go in while we resolve the p2m teardown.
> > > 
> > > Going through the patch again, I'm not seeing any loop being
> > > added. Am I missing something here?
> > 
> > Yes. Since the destruction of p2m leaking foreign pages only applies
> > to control domain being destroyed, i don't think it is that critical
> > that part get into 4.4.
> 
> Nak.  We can't let the tools break the refcounting.
> 
> Tim.

Nak the nak. The issue here is control domain dying, not what the
tool stack did prior to that.

Mukesh

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-17 10:10                         ` Tim Deegan
  2013-12-17 23:24                           ` Mukesh Rathor
@ 2013-12-18  2:34                           ` Mukesh Rathor
  2013-12-18  9:51                             ` Jan Beulich
  2013-12-18  9:53                             ` Tim Deegan
  1 sibling, 2 replies; 46+ messages in thread
From: Mukesh Rathor @ 2013-12-18  2:34 UTC (permalink / raw)
  To: Tim Deegan
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap,
	Julien Grall, eddie.dong, keir.xen, Jan Beulich

On Tue, 17 Dec 2013 11:10:46 +0100
Tim Deegan <tim@xen.org> wrote:

> At 15:27 -0800 on 16 Dec (1387204042), Mukesh Rathor wrote:
> > On Mon, 16 Dec 2013 08:40:41 +0000
> > "Jan Beulich" <JBeulich@suse.com> wrote:
> > 
> > > >>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
> > > >>> wrote:
> > > >> Also, Jan may have an opinion about whether a teardown
> > > >> operation that has to walk each p2m entry would have to be made
> > > >> preemptible.  I'm not sure where we draw the line on such
> > > >> things.
> > > > 
> > > > Since at present teardown cleanup of foreign is not really that
> > > > important as its only applicable to dom0, let me submit another
> > > > patch for it on Mon with few ideas. That would also keep this
> > > > patch size reasonable, and keep you from having to look at the
> > > > same code over and over. 
> > > > 
> > > > So, please take a look at the version below with above two
> > > > fixes. If you approve it, i can resubmit the entire series
> > > > rebased to latest with your ack on Monday, and the series can
> > > > go in while we resolve the p2m teardown.
> > > 
> > > Going through the patch again, I'm not seeing any loop being
> > > added. Am I missing something here?
> > 
> > Yes. Since the destruction of p2m leaking foreign pages only applies
> > to control domain being destroyed, i don't think it is that critical
> > that part get into 4.4.
> 
> Nak.  We can't let the tools break the refcounting.

BTW, it appears that currently this cleanup doesn't happen on PV. If on a 
PV control domain and the tool stack has mapped foreign pages, and the 
PV control domain dies, the refcnt will be messed up. Right?

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-18  2:34                           ` Mukesh Rathor
@ 2013-12-18  9:51                             ` Jan Beulich
  2013-12-18  9:53                             ` Tim Deegan
  1 sibling, 0 replies; 46+ messages in thread
From: Jan Beulich @ 2013-12-18  9:51 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, Tim Deegan, Julien Grall,
	eddie.dong, keir.xen, jun.nakajima

>>> On 18.12.13 at 03:34, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> On Tue, 17 Dec 2013 11:10:46 +0100
> Tim Deegan <tim@xen.org> wrote:
> 
>> At 15:27 -0800 on 16 Dec (1387204042), Mukesh Rathor wrote:
>> > On Mon, 16 Dec 2013 08:40:41 +0000
>> > "Jan Beulich" <JBeulich@suse.com> wrote:
>> > 
>> > > >>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
>> > > >>> wrote:
>> > > >> Also, Jan may have an opinion about whether a teardown
>> > > >> operation that has to walk each p2m entry would have to be made
>> > > >> preemptible.  I'm not sure where we draw the line on such
>> > > >> things.
>> > > > 
>> > > > Since at present teardown cleanup of foreign is not really that
>> > > > important as its only applicable to dom0, let me submit another
>> > > > patch for it on Mon with few ideas. That would also keep this
>> > > > patch size reasonable, and keep you from having to look at the
>> > > > same code over and over. 
>> > > > 
>> > > > So, please take a look at the version below with above two
>> > > > fixes. If you approve it, i can resubmit the entire series
>> > > > rebased to latest with your ack on Monday, and the series can
>> > > > go in while we resolve the p2m teardown.
>> > > 
>> > > Going through the patch again, I'm not seeing any loop being
>> > > added. Am I missing something here?
>> > 
>> > Yes. Since the destruction of p2m leaking foreign pages only applies
>> > to control domain being destroyed, i don't think it is that critical
>> > that part get into 4.4.
>> 
>> Nak.  We can't let the tools break the refcounting.
> 
> BTW, it appears that currently this cleanup doesn't happen on PV. If on a 
> PV control domain and the tool stack has mapped foreign pages, and the 
> PV control domain dies, the refcnt will be messed up. Right?

I don't think so - the refcounts should all get properly dropped
as page tables get cleaned up.

Jan

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [V6 PATCH 6.2/7] pvh dom0: Add and remove foreign pages
  2013-12-18  2:34                           ` Mukesh Rathor
  2013-12-18  9:51                             ` Jan Beulich
@ 2013-12-18  9:53                             ` Tim Deegan
  1 sibling, 0 replies; 46+ messages in thread
From: Tim Deegan @ 2013-12-18  9:53 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: jun.nakajima, Xen-devel, Ian Campbell, george.dunlap,
	Julien Grall, eddie.dong, keir.xen, Jan Beulich

At 18:34 -0800 on 17 Dec (1387301666), Mukesh Rathor wrote:
> On Tue, 17 Dec 2013 11:10:46 +0100
> Tim Deegan <tim@xen.org> wrote:
> 
> > At 15:27 -0800 on 16 Dec (1387204042), Mukesh Rathor wrote:
> > > On Mon, 16 Dec 2013 08:40:41 +0000
> > > "Jan Beulich" <JBeulich@suse.com> wrote:
> > > 
> > > > >>> On 14.12.13 at 03:48, Mukesh Rathor <mukesh.rathor@oracle.com>
> > > > >>> wrote:
> > > > >> Also, Jan may have an opinion about whether a teardown
> > > > >> operation that has to walk each p2m entry would have to be made
> > > > >> preemptible.  I'm not sure where we draw the line on such
> > > > >> things.
> > > > > 
> > > > > Since at present teardown cleanup of foreign is not really that
> > > > > important as its only applicable to dom0, let me submit another
> > > > > patch for it on Mon with few ideas. That would also keep this
> > > > > patch size reasonable, and keep you from having to look at the
> > > > > same code over and over. 
> > > > > 
> > > > > So, please take a look at the version below with above two
> > > > > fixes. If you approve it, i can resubmit the entire series
> > > > > rebased to latest with your ack on Monday, and the series can
> > > > > go in while we resolve the p2m teardown.
> > > > 
> > > > Going through the patch again, I'm not seeing any loop being
> > > > added. Am I missing something here?
> > > 
> > > Yes. Since the destruction of p2m leaking foreign pages only applies
> > > to control domain being destroyed, i don't think it is that critical
> > > that part get into 4.4.
> > 
> > Nak.  We can't let the tools break the refcounting.
> 
> BTW, it appears that currently this cleanup doesn't happen on PV. If on a 
> PV control domain and the tool stack has mapped foreign pages, and the 
> PV control domain dies, the refcnt will be messed up. Right?

Not at all.  PV domains' foreign mappings are refcounted through their
pagetables, which are unwound on domain destruction -- otherwise even
their local mappings would mess up the refcounts!

Tim.

^ permalink raw reply	[flat|nested] 46+ messages in thread

end of thread, other threads:[~2013-12-18  9:53 UTC | newest]

Thread overview: 46+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-12-06  2:38 [V6 PATCH 0/7]: PVH dom0 Mukesh Rathor
2013-12-06  2:38 ` [V6 PATCH 1/7] pvh dom0: move some pv specific code to static functions Mukesh Rathor
2013-12-06  2:38 ` [V6 PATCH 2/7] pvh dom0: construct_dom0 changes Mukesh Rathor
2013-12-06  2:38 ` [V6 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
2013-12-06  2:38 ` [V6 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign Mukesh Rathor
2013-12-09 12:02   ` Tim Deegan
2013-12-06  2:38 ` [V6 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
2013-12-06  2:38 ` [V6 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
2013-12-06  2:54   ` Mukesh Rathor
2013-12-06 11:46     ` Jan Beulich
2013-12-07  2:09       ` Mukesh Rathor
2013-12-07  2:34   ` [V6 PATCH 6.1/7] " Mukesh Rathor
2013-12-07 16:06     ` Julien Grall
2013-12-09  9:50     ` Jan Beulich
2013-12-10  1:30       ` Mukesh Rathor
2013-12-09 10:31     ` Ian Campbell
2013-12-09 13:46       ` Julien Grall
2013-12-09 12:11     ` Tim Deegan
2013-12-10  2:16       ` Mukesh Rathor
2013-12-09  2:45   ` [V6 PATCH 6/7] " Julien Grall
2013-12-09  2:57     ` Julien Grall
2013-12-10  2:17     ` Mukesh Rathor
2013-12-11  0:27   ` [V6 PATCH 6.2/7] " Mukesh Rathor
2013-12-11  0:44     ` Mukesh Rathor
2013-12-11  1:35       ` Julien Grall
2013-12-11  1:47         ` Mukesh Rathor
2013-12-11  9:23           ` Jan Beulich
2013-12-11 14:29           ` Tim Deegan
2013-12-12  2:46             ` Mukesh Rathor
2013-12-13  2:44               ` Mukesh Rathor
2013-12-13 11:25                 ` Tim Deegan
2013-12-13 11:39                   ` Jan Beulich
2013-12-13 19:02                     ` George Dunlap
2013-12-16  7:47                       ` Jan Beulich
2013-12-14  2:48                   ` Mukesh Rathor
2013-12-16  8:40                     ` Jan Beulich
2013-12-16 23:27                       ` Mukesh Rathor
2013-12-16 23:44                         ` Julien Grall
2013-12-17  1:51                           ` Mukesh Rathor
2013-12-17  2:33                         ` Mukesh Rathor
2013-12-17 10:10                         ` Tim Deegan
2013-12-17 23:24                           ` Mukesh Rathor
2013-12-18  2:34                           ` Mukesh Rathor
2013-12-18  9:51                             ` Jan Beulich
2013-12-18  9:53                             ` Tim Deegan
2013-12-06  2:38 ` [V6 PATCH 7/7] pvh dom0: add opt_dom0pvh to setup.c Mukesh Rathor

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.