All of lore.kernel.org
 help / color / mirror / Atom feed
* [V7 PATCH 0/7]: PVH dom0....
@ 2013-12-17  2:38 Mukesh Rathor
  2013-12-17  2:38 ` [V7 PATCH 1/7] pvh dom0: move some pv specific code to static functions Mukesh Rathor
                   ` (7 more replies)
  0 siblings, 8 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

Hi,

V7: The only change from V6 is in patch #6:
      - remove p2m_rem_foreign: this code can be just skipped now as the
        most critical thing to release refcnt has been moved to ept
      - fixup get_page_from_gfn so that it returns refcnt for foreign 
        pages also, altho this is redundant, as a refcnt is already held.
        But this simplifies code and helps ARM also.


These patches are based on c/s: e423b5cd60ff95ba3680e2e4a8440d4d19b2b13e

thanks
Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* [V7 PATCH 1/7] pvh dom0: move some pv specific code to static functions
  2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
@ 2013-12-17  2:38 ` Mukesh Rathor
  2013-12-17  2:38 ` [V7 PATCH 2/7] pvh dom0: construct_dom0 changes Mukesh Rathor
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

In this preparatory patch, some pv specific code is
carved out into static functions. No functionality change.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/domain_build.c |  347 +++++++++++++++++++++++--------------------
 1 files changed, 188 insertions(+), 159 deletions(-)

diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 84ce392..f3ae9ad 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -307,6 +307,187 @@ static void __init process_dom0_ioports_disable(void)
     }
 }
 
+static __init void mark_pv_pt_pages_rdonly(struct domain *d,
+                                           l4_pgentry_t *l4start,
+                                           unsigned long vpt_start,
+                                           unsigned long nr_pt_pages)
+{
+    unsigned long count;
+    struct page_info *page;
+    l4_pgentry_t *pl4e;
+    l3_pgentry_t *pl3e;
+    l2_pgentry_t *pl2e;
+    l1_pgentry_t *pl1e;
+
+    pl4e = l4start + l4_table_offset(vpt_start);
+    pl3e = l4e_to_l3e(*pl4e);
+    pl3e += l3_table_offset(vpt_start);
+    pl2e = l3e_to_l2e(*pl3e);
+    pl2e += l2_table_offset(vpt_start);
+    pl1e = l2e_to_l1e(*pl2e);
+    pl1e += l1_table_offset(vpt_start);
+    for ( count = 0; count < nr_pt_pages; count++ )
+    {
+        l1e_remove_flags(*pl1e, _PAGE_RW);
+        page = mfn_to_page(l1e_get_pfn(*pl1e));
+
+        /* Read-only mapping + PGC_allocated + page-table page. */
+        page->count_info         = PGC_allocated | 3;
+        page->u.inuse.type_info |= PGT_validated | 1;
+
+        /* Top-level p.t. is pinned. */
+        if ( (page->u.inuse.type_info & PGT_type_mask) ==
+             (!is_pv_32on64_domain(d) ?
+              PGT_l4_page_table : PGT_l3_page_table) )
+        {
+            page->count_info        += 1;
+            page->u.inuse.type_info += 1 | PGT_pinned;
+        }
+
+        /* Iterate. */
+        if ( !((unsigned long)++pl1e & (PAGE_SIZE - 1)) )
+        {
+            if ( !((unsigned long)++pl2e & (PAGE_SIZE - 1)) )
+            {
+                if ( !((unsigned long)++pl3e & (PAGE_SIZE - 1)) )
+                    pl3e = l4e_to_l3e(*++pl4e);
+                pl2e = l3e_to_l2e(*pl3e);
+            }
+            pl1e = l2e_to_l1e(*pl2e);
+        }
+    }
+}
+
+static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
+                                    unsigned long v_start, unsigned long v_end,
+                                    unsigned long vphysmap_start,
+                                    unsigned long vphysmap_end,
+                                    unsigned long nr_pages)
+{
+    struct page_info *page = NULL;
+    l4_pgentry_t *pl4e, *l4start = map_domain_page(pgtbl_pfn);
+    l3_pgentry_t *pl3e = NULL;
+    l2_pgentry_t *pl2e = NULL;
+    l1_pgentry_t *pl1e = NULL;
+
+    if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
+        panic("DOM0 P->M table overlaps initial mapping");
+
+    while ( vphysmap_start < vphysmap_end )
+    {
+        if ( d->tot_pages + ((round_pgup(vphysmap_end) - vphysmap_start)
+                             >> PAGE_SHIFT) + 3 > nr_pages )
+            panic("Dom0 allocation too small for initial P->M table");
+
+        if ( pl1e )
+        {
+            unmap_domain_page(pl1e);
+            pl1e = NULL;
+        }
+        if ( pl2e )
+        {
+            unmap_domain_page(pl2e);
+            pl2e = NULL;
+        }
+        if ( pl3e )
+        {
+            unmap_domain_page(pl3e);
+            pl3e = NULL;
+        }
+        pl4e = l4start + l4_table_offset(vphysmap_start);
+        if ( !l4e_get_intpte(*pl4e) )
+        {
+            page = alloc_domheap_page(d, 0);
+            if ( !page )
+                break;
+
+            /* No mapping, PGC_allocated + page-table page. */
+            page->count_info = PGC_allocated | 2;
+            page->u.inuse.type_info = PGT_l3_page_table | PGT_validated | 1;
+            pl3e = __map_domain_page(page);
+            clear_page(pl3e);
+            *pl4e = l4e_from_page(page, L4_PROT);
+        } else
+            pl3e = map_domain_page(l4e_get_pfn(*pl4e));
+
+        pl3e += l3_table_offset(vphysmap_start);
+        if ( !l3e_get_intpte(*pl3e) )
+        {
+            if ( cpu_has_page1gb &&
+                 !(vphysmap_start & ((1UL << L3_PAGETABLE_SHIFT) - 1)) &&
+                 vphysmap_end >= vphysmap_start + (1UL << L3_PAGETABLE_SHIFT) &&
+                 (page = alloc_domheap_pages(d,
+                                             L3_PAGETABLE_SHIFT - PAGE_SHIFT,
+                                             0)) != NULL )
+            {
+                *pl3e = l3e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
+                vphysmap_start += 1UL << L3_PAGETABLE_SHIFT;
+                continue;
+            }
+            if ( (page = alloc_domheap_page(d, 0)) == NULL )
+                break;
+
+            /* No mapping, PGC_allocated + page-table page. */
+            page->count_info = PGC_allocated | 2;
+            page->u.inuse.type_info = PGT_l2_page_table | PGT_validated | 1;
+            pl2e = __map_domain_page(page);
+            clear_page(pl2e);
+            *pl3e = l3e_from_page(page, L3_PROT);
+        }
+        else
+           pl2e = map_domain_page(l3e_get_pfn(*pl3e));
+
+        pl2e += l2_table_offset(vphysmap_start);
+        if ( !l2e_get_intpte(*pl2e) )
+        {
+            if ( !(vphysmap_start & ((1UL << L2_PAGETABLE_SHIFT) - 1)) &&
+                 vphysmap_end >= vphysmap_start + (1UL << L2_PAGETABLE_SHIFT) &&
+                 (page = alloc_domheap_pages(d,
+                                             L2_PAGETABLE_SHIFT - PAGE_SHIFT,
+                                             0)) != NULL )
+            {
+                *pl2e = l2e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
+                if ( opt_allow_superpage )
+                    get_superpage(page_to_mfn(page), d);
+                vphysmap_start += 1UL << L2_PAGETABLE_SHIFT;
+                continue;
+            }
+            if ( (page = alloc_domheap_page(d, 0)) == NULL )
+                break;
+
+            /* No mapping, PGC_allocated + page-table page. */
+            page->count_info = PGC_allocated | 2;
+            page->u.inuse.type_info = PGT_l1_page_table | PGT_validated | 1;
+            pl1e = __map_domain_page(page);
+            clear_page(pl1e);
+            *pl2e = l2e_from_page(page, L2_PROT);
+        }
+        else
+            pl1e = map_domain_page(l2e_get_pfn(*pl2e));
+
+        pl1e += l1_table_offset(vphysmap_start);
+        BUG_ON(l1e_get_intpte(*pl1e));
+        page = alloc_domheap_page(d, 0);
+        if ( !page )
+            break;
+
+        *pl1e = l1e_from_page(page, L1_PROT|_PAGE_DIRTY);
+        vphysmap_start += PAGE_SIZE;
+        vphysmap_start &= PAGE_MASK;
+    }
+    if ( !page )
+        panic("Not enough RAM for DOM0 P->M table");
+
+    if ( pl1e )
+        unmap_domain_page(pl1e);
+    if ( pl2e )
+        unmap_domain_page(pl2e);
+    if ( pl3e )
+        unmap_domain_page(pl3e);
+
+    unmap_domain_page(l4start);
+}
+
 int __init construct_dom0(
     struct domain *d,
     const module_t *image, unsigned long image_headroom,
@@ -706,43 +887,8 @@ int __init construct_dom0(
     }
 
     /* Pages that are part of page tables must be read only. */
-    l4tab = l4start + l4_table_offset(vpt_start);
-    l3start = l3tab = l4e_to_l3e(*l4tab);
-    l3tab += l3_table_offset(vpt_start);
-    l2start = l2tab = l3e_to_l2e(*l3tab);
-    l2tab += l2_table_offset(vpt_start);
-    l1start = l1tab = l2e_to_l1e(*l2tab);
-    l1tab += l1_table_offset(vpt_start);
-    for ( count = 0; count < nr_pt_pages; count++ ) 
-    {
-        l1e_remove_flags(*l1tab, _PAGE_RW);
-        page = mfn_to_page(l1e_get_pfn(*l1tab));
-
-        /* Read-only mapping + PGC_allocated + page-table page. */
-        page->count_info         = PGC_allocated | 3;
-        page->u.inuse.type_info |= PGT_validated | 1;
-
-        /* Top-level p.t. is pinned. */
-        if ( (page->u.inuse.type_info & PGT_type_mask) ==
-             (!is_pv_32on64_domain(d) ?
-              PGT_l4_page_table : PGT_l3_page_table) )
-        {
-            page->count_info        += 1;
-            page->u.inuse.type_info += 1 | PGT_pinned;
-        }
-
-        /* Iterate. */
-        if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
-        {
-            if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
-            {
-                if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
-                    l3start = l3tab = l4e_to_l3e(*++l4tab);
-                l2start = l2tab = l3e_to_l2e(*l3tab);
-            }
-            l1start = l1tab = l2e_to_l1e(*l2tab);
-        }
-    }
+    if  ( is_pv_domain(d) )
+        mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages);
 
     /* Mask all upcalls... */
     for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
@@ -814,132 +960,15 @@ int __init construct_dom0(
              elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : "");
 
     count = d->tot_pages;
-    l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
-    l3tab = NULL;
-    l2tab = NULL;
-    l1tab = NULL;
+
     /* Set up the phys->machine table if not part of the initial mapping. */
-    if ( parms.p2m_base != UNSET_ADDR )
+    if ( is_pv_domain(d) && parms.p2m_base != UNSET_ADDR )
     {
-        unsigned long va = vphysmap_start;
-
-        if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
-            panic("DOM0 P->M table overlaps initial mapping");
-
-        while ( va < vphysmap_end )
-        {
-            if ( d->tot_pages + ((round_pgup(vphysmap_end) - va)
-                                 >> PAGE_SHIFT) + 3 > nr_pages )
-                panic("Dom0 allocation too small for initial P->M table");
-
-            if ( l1tab )
-            {
-                unmap_domain_page(l1tab);
-                l1tab = NULL;
-            }
-            if ( l2tab )
-            {
-                unmap_domain_page(l2tab);
-                l2tab = NULL;
-            }
-            if ( l3tab )
-            {
-                unmap_domain_page(l3tab);
-                l3tab = NULL;
-            }
-            l4tab = l4start + l4_table_offset(va);
-            if ( !l4e_get_intpte(*l4tab) )
-            {
-                page = alloc_domheap_page(d, 0);
-                if ( !page )
-                    break;
-                /* No mapping, PGC_allocated + page-table page. */
-                page->count_info = PGC_allocated | 2;
-                page->u.inuse.type_info =
-                    PGT_l3_page_table | PGT_validated | 1;
-                l3tab = __map_domain_page(page);
-                clear_page(l3tab);
-                *l4tab = l4e_from_page(page, L4_PROT);
-            } else
-                l3tab = map_domain_page(l4e_get_pfn(*l4tab));
-            l3tab += l3_table_offset(va);
-            if ( !l3e_get_intpte(*l3tab) )
-            {
-                if ( cpu_has_page1gb &&
-                     !(va & ((1UL << L3_PAGETABLE_SHIFT) - 1)) &&
-                     vphysmap_end >= va + (1UL << L3_PAGETABLE_SHIFT) &&
-                     (page = alloc_domheap_pages(d,
-                                                 L3_PAGETABLE_SHIFT -
-                                                     PAGE_SHIFT,
-                                                 0)) != NULL )
-                {
-                    *l3tab = l3e_from_page(page,
-                                           L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
-                    va += 1UL << L3_PAGETABLE_SHIFT;
-                    continue;
-                }
-                if ( (page = alloc_domheap_page(d, 0)) == NULL )
-                    break;
-                /* No mapping, PGC_allocated + page-table page. */
-                page->count_info = PGC_allocated | 2;
-                page->u.inuse.type_info =
-                    PGT_l2_page_table | PGT_validated | 1;
-                l2tab = __map_domain_page(page);
-                clear_page(l2tab);
-                *l3tab = l3e_from_page(page, L3_PROT);
-            }
-            else
-               l2tab = map_domain_page(l3e_get_pfn(*l3tab));
-            l2tab += l2_table_offset(va);
-            if ( !l2e_get_intpte(*l2tab) )
-            {
-                if ( !(va & ((1UL << L2_PAGETABLE_SHIFT) - 1)) &&
-                     vphysmap_end >= va + (1UL << L2_PAGETABLE_SHIFT) &&
-                     (page = alloc_domheap_pages(d,
-                                                 L2_PAGETABLE_SHIFT -
-                                                     PAGE_SHIFT,
-                                                 0)) != NULL )
-                {
-                    *l2tab = l2e_from_page(page,
-                                           L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
-                    if ( opt_allow_superpage )
-                        get_superpage(page_to_mfn(page), d);
-                    va += 1UL << L2_PAGETABLE_SHIFT;
-                    continue;
-                }
-                if ( (page = alloc_domheap_page(d, 0)) == NULL )
-                    break;
-                /* No mapping, PGC_allocated + page-table page. */
-                page->count_info = PGC_allocated | 2;
-                page->u.inuse.type_info =
-                    PGT_l1_page_table | PGT_validated | 1;
-                l1tab = __map_domain_page(page);
-                clear_page(l1tab);
-                *l2tab = l2e_from_page(page, L2_PROT);
-            }
-            else
-                l1tab = map_domain_page(l2e_get_pfn(*l2tab));
-            l1tab += l1_table_offset(va);
-            BUG_ON(l1e_get_intpte(*l1tab));
-            page = alloc_domheap_page(d, 0);
-            if ( !page )
-                break;
-            *l1tab = l1e_from_page(page, L1_PROT|_PAGE_DIRTY);
-            va += PAGE_SIZE;
-            va &= PAGE_MASK;
-        }
-        if ( !page )
-            panic("Not enough RAM for DOM0 P->M table");
+        pfn = pagetable_get_pfn(v->arch.guest_table);
+        setup_pv_physmap(d, pfn, v_start, v_end, vphysmap_start, vphysmap_end,
+                         nr_pages);
     }
 
-    if ( l1tab )
-        unmap_domain_page(l1tab);
-    if ( l2tab )
-        unmap_domain_page(l2tab);
-    if ( l3tab )
-        unmap_domain_page(l3tab);
-    unmap_domain_page(l4start);
-
     /* Write the phys->machine and machine->phys table entries. */
     for ( pfn = 0; pfn < count; pfn++ )
     {
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 52+ messages in thread

* [V7 PATCH 2/7] pvh dom0: construct_dom0 changes
  2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
  2013-12-17  2:38 ` [V7 PATCH 1/7] pvh dom0: move some pv specific code to static functions Mukesh Rathor
@ 2013-12-17  2:38 ` Mukesh Rathor
  2013-12-17  2:38 ` [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

This patch changes construct_dom0 to boot in PVH mode. Changes
need to support it are also included here.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/domain_build.c |  235 +++++++++++++++++++++++++++++++++++++++---
 xen/arch/x86/mm/hap/hap.c   |   15 +++
 xen/include/asm-x86/hap.h   |    1 +
 3 files changed, 234 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index f3ae9ad..a563c61 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -35,6 +35,7 @@
 #include <asm/setup.h>
 #include <asm/bzimage.h> /* for bzimage_parse */
 #include <asm/io_apic.h>
+#include <asm/hap.h>
 
 #include <public/version.h>
 
@@ -307,6 +308,151 @@ static void __init process_dom0_ioports_disable(void)
     }
 }
 
+static __init void pvh_add_mem_mapping(struct domain *d, unsigned long gfn,
+                                       unsigned long mfn, unsigned long nr_mfns)
+{
+    unsigned long i;
+    for ( i = 0; i < nr_mfns; i++ )
+        if ( !set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i)) )
+            panic("Failed setting p2m. gfn:%lx mfn:%lx i:%ld\n", gfn, mfn, i);
+}
+
+/*
+ * Set the 1:1 map for all non-RAM regions for dom 0. Thus, dom0 will have
+ * the entire io region mapped in the EPT/NPT.
+ *
+ * pvh fixme: The following doesn't map MMIO ranges when they sit above the
+ *            highest E820 covered address.
+ */
+static __init void pvh_map_all_iomem(struct domain *d)
+{
+    unsigned long start_pfn, end_pfn, end = 0, start = 0;
+    const struct e820entry *entry;
+    unsigned int i, nump;
+
+    for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
+    {
+        end = entry->addr + entry->size;
+
+        if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE ||
+             i == e820.nr_map - 1 )
+        {
+            start_pfn = PFN_DOWN(start);
+
+            /* Unused RAM areas are marked UNUSABLE, so skip it too */
+            if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE )
+                end_pfn = PFN_UP(entry->addr);
+            else
+                end_pfn = PFN_UP(end);
+
+            if ( start_pfn < end_pfn )
+            {
+                nump = end_pfn - start_pfn;
+                /* Add pages to the mapping */
+                pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
+            }
+            start = end;
+        }
+    }
+
+    /* If the e820 ended under 4GB, we must map the remaining space upto 4GB */
+    if ( end < GB(4) )
+    {
+        start_pfn = PFN_UP(end);
+        end_pfn = (GB(4)) >> PAGE_SHIFT;
+        nump = end_pfn - start_pfn;
+        pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
+    }
+}
+
+static __init void dom0_update_physmap(struct domain *d, unsigned long pfn,
+                                   unsigned long mfn, unsigned long vphysmap_s)
+{
+    if ( is_pvh_domain(d) )
+    {
+        int rc = guest_physmap_add_page(d, pfn, mfn, 0);
+        BUG_ON(rc);
+        return;
+    }
+    if ( !is_pv_32on64_domain(d) )
+        ((unsigned long *)vphysmap_s)[pfn] = mfn;
+    else
+        ((unsigned int *)vphysmap_s)[pfn] = mfn;
+
+    set_gpfn_from_mfn(mfn, pfn);
+}
+
+static __init void pvh_fixup_page_tables_for_hap(struct vcpu *v,
+                                                 unsigned long v_start,
+                                                 unsigned long v_end)
+{
+    int i, j, k;
+    l4_pgentry_t *pl4e, *l4start;
+    l3_pgentry_t *pl3e;
+    l2_pgentry_t *pl2e;
+    l1_pgentry_t *pl1e;
+    unsigned long cr3_pfn;
+
+    ASSERT(paging_mode_enabled(v->domain));
+
+    l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+
+    /* Clear entries prior to guest L4 start */
+    pl4e = l4start + l4_table_offset(v_start);
+    memset(l4start, 0, (unsigned long)pl4e - (unsigned long)l4start);
+
+    for ( ; pl4e <= l4start + l4_table_offset(v_end - 1); pl4e++ )
+    {
+        pl3e = map_l3t_from_l4e(*pl4e);
+        for ( i = 0; i < PAGE_SIZE / sizeof(*pl3e); i++, pl3e++ )
+        {
+            if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
+                continue;
+
+            pl2e = map_l2t_from_l3e(*pl3e);
+            for ( j = 0; j < PAGE_SIZE / sizeof(*pl2e); j++, pl2e++ )
+            {
+                if ( !(l2e_get_flags(*pl2e)  & _PAGE_PRESENT) )
+                    continue;
+
+                pl1e = map_l1t_from_l2e(*pl2e);
+                for ( k = 0; k < PAGE_SIZE / sizeof(*pl1e); k++, pl1e++ )
+                {
+                    if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
+                        continue;
+
+                    *pl1e = l1e_from_pfn(get_gpfn_from_mfn(l1e_get_pfn(*pl1e)),
+                                         l1e_get_flags(*pl1e));
+                }
+                unmap_domain_page(pl1e);
+                *pl2e = l2e_from_pfn(get_gpfn_from_mfn(l2e_get_pfn(*pl2e)),
+                                     l2e_get_flags(*pl2e));
+            }
+            unmap_domain_page(pl2e);
+            *pl3e = l3e_from_pfn(get_gpfn_from_mfn(l3e_get_pfn(*pl3e)),
+                                 l3e_get_flags(*pl3e));
+        }
+        unmap_domain_page(pl3e);
+        *pl4e = l4e_from_pfn(get_gpfn_from_mfn(l4e_get_pfn(*pl4e)),
+                             l4e_get_flags(*pl4e));
+    }
+
+    /* Clear entries post guest L4. */
+    if ( (unsigned long)pl4e & (PAGE_SIZE - 1) )
+        memset(pl4e, 0, PAGE_SIZE - ((unsigned long)pl4e & (PAGE_SIZE - 1)));
+
+    unmap_domain_page(l4start);
+
+    cr3_pfn = get_gpfn_from_mfn(paddr_to_pfn(v->arch.cr3));
+    v->arch.hvm_vcpu.guest_cr[3] = pfn_to_paddr(cr3_pfn);
+
+    /*
+     * Finally, we update the paging modes (hap_update_paging_modes). This will
+     * create monitor_table for us, update v->arch.cr3, and update vmcs.cr3.
+     */
+    paging_update_paging_modes(v);
+}
+
 static __init void mark_pv_pt_pages_rdonly(struct domain *d,
                                            l4_pgentry_t *l4start,
                                            unsigned long vpt_start,
@@ -516,6 +662,8 @@ int __init construct_dom0(
     l3_pgentry_t *l3tab = NULL, *l3start = NULL;
     l2_pgentry_t *l2tab = NULL, *l2start = NULL;
     l1_pgentry_t *l1tab = NULL, *l1start = NULL;
+    paddr_t shared_info_paddr = 0;
+    u32 save_pvh_pg_mode = 0;
 
     /*
      * This fully describes the memory layout of the initial domain. All 
@@ -593,12 +741,21 @@ int __init construct_dom0(
         goto out;
     }
 
-    if ( parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE &&
-         !test_bit(XENFEAT_dom0, parms.f_supported) )
+    if ( parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE )
     {
-        printk("Kernel does not support Dom0 operation\n");
-        rc = -EINVAL;
-        goto out;
+        if ( !test_bit(XENFEAT_dom0, parms.f_supported) )
+        {
+            printk("Kernel does not support Dom0 operation\n");
+            rc = -EINVAL;
+            goto out;
+        }
+        if ( is_pvh_domain(d) &&
+             !test_bit(XENFEAT_hvm_callback_vector, parms.f_supported) )
+        {
+            printk("Kernel does not support PVH mode\n");
+            rc = -EINVAL;
+            goto out;
+        }
     }
 
     if ( compat32 )
@@ -663,6 +820,13 @@ int __init construct_dom0(
     vstartinfo_end   = (vstartinfo_start +
                         sizeof(struct start_info) +
                         sizeof(struct dom0_vga_console_info));
+
+    if ( is_pvh_domain(d) )
+    {
+        shared_info_paddr = round_pgup(vstartinfo_end) - v_start;
+        vstartinfo_end   += PAGE_SIZE;
+    }
+
     vpt_start        = round_pgup(vstartinfo_end);
     for ( nr_pt_pages = 2; ; nr_pt_pages++ )
     {
@@ -903,6 +1067,13 @@ int __init construct_dom0(
         (void)alloc_vcpu(d, i, cpu);
     }
 
+    /*
+     * pvh: we temporarily disable paging mode so that we can build cr3 needed
+     * to run on dom0's page tables.
+     */
+    save_pvh_pg_mode = d->arch.paging.mode;
+    d->arch.paging.mode = 0;
+
     /* Set up CR3 value for write_ptbase */
     if ( paging_mode_enabled(d) )
         paging_update_paging_modes(v);
@@ -969,6 +1140,15 @@ int __init construct_dom0(
                          nr_pages);
     }
 
+    if ( is_pvh_domain(d) )
+        hap_set_pvh_alloc_for_dom0(d, nr_pages);
+
+    /*
+     * We enable paging mode again so guest_physmap_add_page will do the
+     * right thing for us.
+     */
+    d->arch.paging.mode = save_pvh_pg_mode;
+
     /* Write the phys->machine and machine->phys table entries. */
     for ( pfn = 0; pfn < count; pfn++ )
     {
@@ -985,11 +1165,7 @@ int __init construct_dom0(
         if ( pfn > REVERSE_START && (vinitrd_start || pfn < initrd_pfn) )
             mfn = alloc_epfn - (pfn - REVERSE_START);
 #endif
-        if ( !is_pv_32on64_domain(d) )
-            ((unsigned long *)vphysmap_start)[pfn] = mfn;
-        else
-            ((unsigned int *)vphysmap_start)[pfn] = mfn;
-        set_gpfn_from_mfn(mfn, pfn);
+        dom0_update_physmap(d, pfn, mfn, vphysmap_start);
         if (!(pfn & 0xfffff))
             process_pending_softirqs();
     }
@@ -1005,8 +1181,8 @@ int __init construct_dom0(
             if ( !page->u.inuse.type_info &&
                  !get_page_and_type(page, d, PGT_writable_page) )
                 BUG();
-            ((unsigned long *)vphysmap_start)[pfn] = mfn;
-            set_gpfn_from_mfn(mfn, pfn);
+
+            dom0_update_physmap(d, pfn, mfn, vphysmap_start);
             ++pfn;
             if (!(pfn & 0xfffff))
                 process_pending_softirqs();
@@ -1026,11 +1202,7 @@ int __init construct_dom0(
 #ifndef NDEBUG
 #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
 #endif
-            if ( !is_pv_32on64_domain(d) )
-                ((unsigned long *)vphysmap_start)[pfn] = mfn;
-            else
-                ((unsigned int *)vphysmap_start)[pfn] = mfn;
-            set_gpfn_from_mfn(mfn, pfn);
+            dom0_update_physmap(d, pfn, mfn, vphysmap_start);
 #undef pfn
             page++; pfn++;
             if (!(pfn & 0xfffff))
@@ -1054,6 +1226,15 @@ int __init construct_dom0(
         si->console.dom0.info_size = sizeof(struct dom0_vga_console_info);
     }
 
+    /*
+     * PVH: We need to update si->shared_info while we are on dom0 page tables,
+     * but need to defer the p2m update until after we have fixed up the
+     * page tables for PVH so that the m2p for the si pte entry returns
+     * correct pfn.
+     */
+    if ( is_pvh_domain(d) )
+        si->shared_info = shared_info_paddr;
+
     if ( is_pv_32on64_domain(d) )
         xlat_start_info(si, XLAT_start_info_console_dom0);
 
@@ -1087,8 +1268,15 @@ int __init construct_dom0(
     regs->eflags = X86_EFLAGS_IF;
 
     if ( opt_dom0_shadow )
+    {
+        if ( is_pvh_domain(d) )
+        {
+            printk("Unsupported option dom0_shadow for PVH\n");
+            return -EINVAL;
+        }
         if ( paging_enable(d, PG_SH_enable) == 0 ) 
             paging_update_paging_modes(v);
+    }
 
     if ( supervisor_mode_kernel )
     {
@@ -1178,6 +1366,19 @@ int __init construct_dom0(
         printk(" Xen warning: dom0 kernel broken ELF: %s\n",
                elf_check_broken(&elf));
 
+    if ( is_pvh_domain(d) )
+    {
+        /* finally, fixup the page table, replacing mfns with pfns */
+        pvh_fixup_page_tables_for_hap(v, v_start, v_end);
+
+        /* the pt has correct pfn for si, now update the mfn in the p2m */
+        mfn = virt_to_mfn(d->shared_info);
+        pfn = shared_info_paddr >> PAGE_SHIFT;
+        dom0_update_physmap(d, pfn, mfn, 0);
+
+        pvh_map_all_iomem(d);
+    }
+
     iommu_dom0_init(dom0);
     return 0;
 
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index d3f64bd..cc3ba66 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -579,6 +579,21 @@ int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
     }
 }
 
+void __init hap_set_pvh_alloc_for_dom0(struct domain *d,
+                                       unsigned long num_pages)
+{
+    int rc;
+    unsigned long memkb = num_pages * (PAGE_SIZE / 1024);
+
+    /* Copied from: libxl_get_required_shadow_memory() */
+    memkb = 4 * (256 * d->max_vcpus + 2 * (memkb / 1024));
+    num_pages = ( (memkb + 1023) / 1024) << (20 - PAGE_SHIFT);
+    paging_lock(d);
+    rc = hap_set_allocation(d, num_pages, NULL);
+    paging_unlock(d);
+    BUG_ON(rc);
+}
+
 static const struct paging_mode hap_paging_real_mode;
 static const struct paging_mode hap_paging_protected_mode;
 static const struct paging_mode hap_paging_pae_mode;
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index e03f983..aab8558 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -63,6 +63,7 @@ int   hap_track_dirty_vram(struct domain *d,
                            XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
 
 extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
+void hap_set_pvh_alloc_for_dom0(struct domain *d, unsigned long num_pages);
 
 #endif /* XEN_HAP_H */
 
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 52+ messages in thread

* [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
  2013-12-17  2:38 ` [V7 PATCH 1/7] pvh dom0: move some pv specific code to static functions Mukesh Rathor
  2013-12-17  2:38 ` [V7 PATCH 2/7] pvh dom0: construct_dom0 changes Mukesh Rathor
@ 2013-12-17  2:38 ` Mukesh Rathor
  2013-12-17 13:07   ` Jan Beulich
  2013-12-17 16:56   ` Jan Beulich
  2013-12-17  2:38 ` [V7 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign Mukesh Rathor
                   ` (4 subsequent siblings)
  7 siblings, 2 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

This preparatory patch adds support for XENMEM_add_to_physmap_range
on x86 so it can be used to create a guest on PVH dom0. To this end, we
add a new function xenmem_add_to_physmap_range(), and change
xenmem_add_to_physmap_once parameters so it can be called from
xenmem_add_to_physmap_range.

Please note, compat will continue to return -ENOSYS.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c |   65 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 65 insertions(+), 0 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index dd42bde..ae332f4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4675,6 +4675,39 @@ static int xenmem_add_to_physmap(struct domain *d,
     return xenmem_add_to_physmap_once(d, xatp);
 }
 
+static int xenmem_add_to_physmap_range(struct domain *d,
+                                       struct xen_add_to_physmap_range *xatpr)
+{
+    /* Process entries in reverse order to allow continuations */
+    while ( xatpr->size > 0 )
+    {
+        int rc;
+        xen_ulong_t idx;
+        xen_pfn_t gpfn;
+        struct xen_add_to_physmap xatp;
+
+        if ( copy_from_guest_offset(&idx, xatpr->idxs, xatpr->size-1, 1)  ||
+             copy_from_guest_offset(&gpfn, xatpr->gpfns, xatpr->size-1, 1) )
+            return -EFAULT;
+
+        xatp.space = xatpr->space;
+        xatp.idx = idx;
+        xatp.gpfn = gpfn;
+        rc = xenmem_add_to_physmap_once(d, &xatp);
+
+        if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
+            return -EFAULT;
+
+        xatpr->size--;
+
+        /* Check for continuation if it's not the last interation */
+        if ( xatpr->size > 0 && hypercall_preempt_check() )
+            return -EAGAIN;
+    }
+
+    return 0;
+}
+
 long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     int rc;
@@ -4689,6 +4722,10 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( copy_from_guest(&xatp, arg, 1) )
             return -EFAULT;
 
+        /* This one is only supported for add_to_physmap_range for now */
+        if ( xatp.space == XENMAPSPACE_gmfn_foreign )
+            return -EINVAL;
+
         d = rcu_lock_domain_by_any_id(xatp.domid);
         if ( d == NULL )
             return -ESRCH;
@@ -4716,6 +4753,34 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         return rc;
     }
 
+    case XENMEM_add_to_physmap_range:
+    {
+        struct xen_add_to_physmap_range xatpr;
+        struct domain *d;
+
+        if ( copy_from_guest(&xatpr, arg, 1) )
+            return -EFAULT;
+
+        /* This mapspace is redundant for this hypercall */
+        if ( xatpr.space == XENMAPSPACE_gmfn_range )
+            return -EINVAL;
+
+        d = rcu_lock_domain_by_any_id(xatpr.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d)) == 0 )
+            rc = xenmem_add_to_physmap_range(d, &xatpr);
+
+        rcu_unlock_domain(d);
+
+        if ( rc == -EAGAIN )
+            rc = hypercall_create_continuation(
+                __HYPERVISOR_memory_op, "ih", op, arg);
+
+        return rc;
+    }
+
     case XENMEM_set_memory_map:
     {
         struct xen_foreign_memory_map fmap;
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 52+ messages in thread

* [V7 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign
  2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (2 preceding siblings ...)
  2013-12-17  2:38 ` [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
@ 2013-12-17  2:38 ` Mukesh Rathor
  2013-12-17  2:38 ` [V7 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

In this patch, a new type p2m_map_foreign is introduced for pages
that toolstack on PVH dom0 maps from foreign domains that its creating
or supporting during it's run time.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/x86/mm/p2m-ept.c |    1 +
 xen/arch/x86/mm/p2m-pt.c  |    1 +
 xen/arch/x86/mm/p2m.c     |   28 ++++++++++++++++++++--------
 xen/include/asm-x86/p2m.h |    4 ++++
 4 files changed, 26 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 92d9e2d..08d1d72 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -75,6 +75,7 @@ static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_acces
             entry->w = 0;
             break;
         case p2m_grant_map_rw:
+        case p2m_map_foreign:
             entry->r = entry->w = 1;
             entry->x = 0;
             break;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index a1d5650..09b60ce 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -89,6 +89,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn)
     case p2m_ram_rw:
         return flags | P2M_BASE_FLAGS | _PAGE_RW;
     case p2m_grant_map_rw:
+    case p2m_map_foreign:
         return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
     case p2m_mmio_direct:
         if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 8f380ed..0659ef1 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -525,7 +525,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
         for ( i = 0; i < (1UL << page_order); i++ )
         {
             mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL);
-            if ( !p2m_is_grant(t) && !p2m_is_shared(t) )
+            if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
                 set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
@@ -756,10 +756,9 @@ void p2m_change_type_range(struct domain *d,
     p2m_unlock(p2m);
 }
 
-
-
-int
-set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+static int
+set_typed_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
+                    p2m_type_t gfn_p2mt)
 {
     int rc = 0;
     p2m_access_t a;
@@ -784,16 +783,29 @@ set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
         set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
     }
 
-    P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
-    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct, p2m->default_access);
+    P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn, mfn_x(mfn));
+    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, gfn_p2mt,
+                       p2m->default_access);
     gfn_unlock(p2m, gfn, 0);
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
-            "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+            "%s: set_p2m_entry failed! mfn=%08lx\n", __func__,
             mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
     return rc;
 }
 
+/* Returns: True for success. */
+int set_foreign_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+{
+    return set_typed_p2m_entry(d, gfn, mfn, p2m_map_foreign);
+}
+
+int
+set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+{
+    return set_typed_p2m_entry(d, gfn, mfn, p2m_mmio_direct);
+}
+
 int
 clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
 {
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index f4e7253..d5d6391 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -70,6 +70,7 @@ typedef enum {
     p2m_ram_paging_in = 11,       /* Memory that is being paged in */
     p2m_ram_shared = 12,          /* Shared or sharable memory */
     p2m_ram_broken = 13,          /* Broken page, access cause domain crash */
+    p2m_map_foreign  = 14,        /* ram pages from foreign domain */
 } p2m_type_t;
 
 /*
@@ -180,6 +181,7 @@ typedef unsigned int p2m_query_t;
 #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
 #define p2m_is_shared(_t)   (p2m_to_mask(_t) & P2M_SHARED_TYPES)
 #define p2m_is_broken(_t)   (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
+#define p2m_is_foreign(_t)  (p2m_to_mask(_t) & p2m_to_mask(p2m_map_foreign))
 
 /* Per-p2m-table state */
 struct p2m_domain {
@@ -510,6 +512,8 @@ p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 
+/* Set foreign mfn in the current guest's p2m table. */
+int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
 /* 
  * Populate-on-demand
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 52+ messages in thread

* [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (3 preceding siblings ...)
  2013-12-17  2:38 ` [V7 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign Mukesh Rathor
@ 2013-12-17  2:38 ` Mukesh Rathor
  2013-12-17  8:32   ` Jan Beulich
                     ` (3 more replies)
  2013-12-17  2:38 ` [V7 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
                   ` (2 subsequent siblings)
  7 siblings, 4 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

In preparation for the next patch, we update xsm_add_to_physmap to
allow for checking of foreign domain. Thus, the current domain must
have the right to update the mappings of target domain with pages from
foreign domain.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/arm/mm.c       |    4 ++--
 xen/arch/x86/mm.c       |   18 +++++++++++++++---
 xen/include/xsm/dummy.h |   10 ++++++++--
 xen/include/xsm/xsm.h   |    6 +++---
 xen/xsm/flask/hooks.c   |    9 +++++++--
 5 files changed, 35 insertions(+), 12 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 654281a..3515526 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1134,7 +1134,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
+        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL);
         if ( rc )
         {
             rcu_unlock_domain(d);
@@ -1165,7 +1165,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
+        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL);
         if ( rc )
         {
             rcu_unlock_domain(d);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index ae332f4..0cae437 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4730,7 +4730,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d) )
+        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL) )
         {
             rcu_unlock_domain(d);
             return -EPERM;
@@ -4756,7 +4756,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
     case XENMEM_add_to_physmap_range:
     {
         struct xen_add_to_physmap_range xatpr;
-        struct domain *d;
+        struct domain *d, *fd = NULL;
 
         if ( copy_from_guest(&xatpr, arg, 1) )
             return -EFAULT;
@@ -4769,10 +4769,22 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d)) == 0 )
+        if ( xatpr.space == XENMAPSPACE_gmfn_foreign )
+        {
+            fd = get_pg_owner(xatpr.foreign_domid);
+            if ( fd == NULL )
+            {
+                rcu_unlock_domain(d);
+                return -ESRCH;
+            }
+        }
+        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
+        if ( rc == 0 )
             rc = xenmem_add_to_physmap_range(d, &xatpr);
 
         rcu_unlock_domain(d);
+        if ( fd )
+            put_pg_owner(fd);
 
         if ( rc == -EAGAIN )
             rc = hypercall_create_continuation(
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index eb9e1a1..1228e52 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -467,10 +467,16 @@ static XSM_INLINE int xsm_pci_config_permission(XSM_DEFAULT_ARG struct domain *d
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2)
+static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d, struct domain *t, struct domain *f)
 {
+    int rc;
+
     XSM_ASSERT_ACTION(XSM_TARGET);
-    return xsm_default_action(action, d1, d2);
+    rc = xsm_default_action(action, d, t);
+    if ( f && !rc )
+        rc = xsm_default_action(action, d, f);
+
+    return rc;
 }
 
 static XSM_INLINE int xsm_remove_from_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2)
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 1939453..9ee9543 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -90,7 +90,7 @@ struct xsm_operations {
     int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_stat_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct page_info *page);
-    int (*add_to_physmap) (struct domain *d1, struct domain *d2);
+    int (*add_to_physmap) (struct domain *d, struct domain *t, struct domain *f);
     int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
     int (*claim_pages) (struct domain *d);
 
@@ -344,9 +344,9 @@ static inline int xsm_memory_pin_page(xsm_default_t def, struct domain *d1, stru
     return xsm_ops->memory_pin_page(d1, d2, page);
 }
 
-static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d1, struct domain *d2)
+static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d, struct domain *t, struct domain *f)
 {
-    return xsm_ops->add_to_physmap(d1, d2);
+    return xsm_ops->add_to_physmap(d, t, f);
 }
 
 static inline int xsm_remove_from_physmap(xsm_default_t def, struct domain *d1, struct domain *d2)
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 7cdef04..81294b1 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1068,9 +1068,14 @@ static inline int flask_tmem_control(void)
     return domain_has_xen(current->domain, XEN__TMEM_CONTROL);
 }
 
-static int flask_add_to_physmap(struct domain *d1, struct domain *d2)
+static int flask_add_to_physmap(struct domain *d, struct domain *t, struct domain *f)
 {
-    return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
+    int rc;
+
+    rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__PHYSMAP);
+    if ( f && !rc )
+        rc = domain_has_perm(d, f, SECCLASS_MMU, MMU__MAP_READ|MMU__MAP_WRITE);
+    return rc;
 }
 
 static int flask_remove_from_physmap(struct domain *d1, struct domain *d2)
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 52+ messages in thread

* [V7 PATCH 6/7] pvh dom0: Add and remove foreign pages
  2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (4 preceding siblings ...)
  2013-12-17  2:38 ` [V7 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
@ 2013-12-17  2:38 ` Mukesh Rathor
  2013-12-17  2:38 ` [V7 PATCH 7/7] pvh dom0: add opt_dom0pvh to setup.c Mukesh Rathor
  2013-12-17 14:46 ` [V7 PATCH 0/7]: PVH dom0 Konrad Rzeszutek Wilk
  7 siblings, 0 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, eddie.dong, keir.xen, tim, JBeulich

In this patch, a new function, p2m_add_foreign(), is added
to map pages from foreign guest into current dom0 for domU creation.
Such pages are typed p2m_map_foreign. Another function
p2m_remove_foreign() is added to remove such pages. Note, it is the
nature of such pages that a refcnt is held during their stay in the p2m.
The refcnt is added and released in the low level ept code for convenience.
The cleanup of foreign pages from p2m upon it's destruction, is submitted
subsequently under a separate patch.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm.c         |   23 +++++++++----
 xen/arch/x86/mm/p2m-ept.c |   30 ++++++++++++++---
 xen/arch/x86/mm/p2m-pt.c  |    9 +++++-
 xen/arch/x86/mm/p2m.c     |   78 ++++++++++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/p2m.h |    3 ++
 5 files changed, 125 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 0cae437..df90712 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2810,7 +2810,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4522,7 +4522,8 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
 
 static int xenmem_add_to_physmap_once(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    const struct xen_add_to_physmap *xatp,
+    struct domain *fdom)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
@@ -4581,6 +4582,13 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = p2m_add_foreign(d, xatp->idx, xatp->gpfn, fdom);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4646,7 +4654,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
-            rc = xenmem_add_to_physmap_once(d, xatp);
+            rc = xenmem_add_to_physmap_once(d, xatp, NULL);
             if ( rc < 0 )
                 break;
 
@@ -4672,11 +4680,12 @@ static int xenmem_add_to_physmap(struct domain *d,
         return rc;
     }
 
-    return xenmem_add_to_physmap_once(d, xatp);
+    return xenmem_add_to_physmap_once(d, xatp, NULL);
 }
 
 static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr)
+                                       struct xen_add_to_physmap_range *xatpr,
+                                       struct domain *fdom)
 {
     /* Process entries in reverse order to allow continuations */
     while ( xatpr->size > 0 )
@@ -4693,7 +4702,7 @@ static int xenmem_add_to_physmap_range(struct domain *d,
         xatp.space = xatpr->space;
         xatp.idx = idx;
         xatp.gpfn = gpfn;
-        rc = xenmem_add_to_physmap_once(d, &xatp);
+        rc = xenmem_add_to_physmap_once(d, &xatp, fdom);
 
         if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
             return -EFAULT;
@@ -4780,7 +4789,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
         rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc == 0 )
-            rc = xenmem_add_to_physmap_range(d, &xatpr);
+            rc = xenmem_add_to_physmap_range(d, &xatpr, fd);
 
         rcu_unlock_domain(d);
         if ( fd )
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 08d1d72..0ba2365 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -36,8 +36,6 @@
 
 #define atomic_read_ept_entry(__pepte)                              \
     ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
-#define atomic_write_ept_entry(__pepte, __epte)                     \
-    write_atomic(&(__pepte)->epte, (__epte).epte)
 
 #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
 #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
@@ -46,6 +44,26 @@ static inline bool_t is_epte_valid(ept_entry_t *e)
     return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
 }
 
+static inline void atomic_write_ept_entry(ept_entry_t *entryptr,
+                                          const ept_entry_t *new)
+{
+    if ( p2m_is_foreign(new->sa_p2mt) )
+    {
+        struct page_info *page;
+        struct domain *fdom;
+
+        ASSERT(mfn_valid(new->mfn));
+        page = mfn_to_page(new->mfn);
+        fdom = page_get_owner(page);
+        get_page(page, fdom);
+    }
+    if ( p2m_is_foreign(entryptr->sa_p2mt) )
+        put_page(mfn_to_page(entryptr->mfn));
+
+    write_atomic(&entryptr->epte, new->epte);
+}
+
+
 static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_access_t access)
 {
     /* First apply type permissions */
@@ -378,7 +396,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
             ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
         }
 
-        atomic_write_ept_entry(ept_entry, new_entry);
+        atomic_write_ept_entry(ept_entry, &new_entry);
     }
     else
     {
@@ -398,7 +416,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
 
         /* now install the newly split ept sub-tree */
         /* NB: please make sure domian is paused and no in-fly VT-d DMA. */
-        atomic_write_ept_entry(ept_entry, split_ept_entry);
+        atomic_write_ept_entry(ept_entry, &split_ept_entry);
 
         /* then move to the level we want to make real changes */
         for ( ; i > target; i-- )
@@ -428,7 +446,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
 
         ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
 
-        atomic_write_ept_entry(ept_entry, new_entry);
+        atomic_write_ept_entry(ept_entry, &new_entry);
     }
 
     /* Track the highest gfn for which we have ever had a valid mapping */
@@ -665,7 +683,7 @@ static void ept_change_entry_type_page(mfn_t ept_page_mfn, int ept_page_level,
 
             e.sa_p2mt = nt;
             ept_p2m_type_to_flags(&e, nt, e.access);
-            atomic_write_ept_entry(&epte[i], e);
+            atomic_write_ept_entry(&epte[i], &e);
         }
     }
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 09b60ce..766fd67 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -276,7 +276,7 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table,
     return 1;
 }
 
-// Returns 0 on error (out of memory)
+// Returns 0 on error (out of memory, or unimplemented p2m type)
 static int
 p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
               unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
@@ -312,6 +312,13 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
         __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
+    if ( p2m_is_foreign(p2mt) )
+    {
+        /* pvh fixme: foreign types are only supported on ept at present */
+        gdprintk(XENLOG_WARNING, "Unimplemented foreign p2m type.\n");
+        return 0;
+    }
+
     if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
                          L4_PAGETABLE_SHIFT - PAGE_SHIFT,
                          L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0659ef1..43d1e2b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -275,14 +275,19 @@ struct page_info *get_page_from_gfn_p2m(
         /* Fast path: look up and get out */
         p2m_read_lock(p2m);
         mfn = __get_gfn_type_access(p2m, gfn, t, a, 0, NULL, 0);
-        if ( (p2m_is_ram(*t) || p2m_is_grant(*t))
+        if ( (p2m_is_ram(*t) || p2m_is_grant(*t) || p2m_is_foreign(*t) )
              && mfn_valid(mfn)
              && !((q & P2M_UNSHARE) && p2m_is_shared(*t)) )
         {
             page = mfn_to_page(mfn);
-            if ( !get_page(page, d)
-                 /* Page could be shared */
-                 && !get_page(page, dom_cow) )
+            if ( p2m_is_foreign(*t) )
+            {
+                struct domain *fdom = page_get_owner_and_reference(page);
+                ASSERT(fdom && fdom != d);
+            }
+            else if ( !get_page(page, d)
+                      /* Page could be shared */
+                      && !get_page(page, dom_cow) )
                 page = NULL;
         }
         p2m_read_unlock(p2m);
@@ -1741,6 +1746,71 @@ out_p2m_audit:
 #endif /* P2M_AUDIT */
 
 /*
+ * Add frames from foreign domain to target domain's physmap. Similar to
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain.
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted in lower level routines
+ *              so it is not lost while mapped here. The refcnt is released
+ *              via the XENMEM_remove_from_physmap path.
+ * Returns: 0 ==> success
+ */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    mfn_t prev_mfn, mfn;
+    struct page_info *page;
+    int rc = 0;
+
+    if ( tdom == fdom || !tdom || !fdom || !is_pvh_domain(tdom) )
+        return -EINVAL;
+
+    /* following will take a refcnt on the mfn */
+    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    if ( !page || !p2m_is_valid(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -EINVAL;
+    }
+    mfn = page_to_mfn(page);
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(tdom, gpfn, mfn_x(prev_mfn), 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(tdom, gpfn);
+    }
+    /*
+     * Create the new mapping. Can't use guest_physmap_add_page() because it
+     * will update the m2p table which will result in  mfn -> gpfn of dom0
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(tdom, gpfn, mfn) == 0 )
+    {
+        gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
+                 "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
+                 gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
+        rc = -EINVAL;
+    }
+    put_page(page);
+
+    /*
+     * We must do this put_gfn after set_foreign_p2m_entry so another cpu
+     * doesn't populate the gpfn before us.
+     */
+    put_gfn(tdom, gpfn);
+
+    return rc;
+}
+
+/*
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d5d6391..8548b4b 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -515,6 +515,9 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 /* Set foreign mfn in the current guest's p2m table. */
 int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
 
+/* Add foreign mapping to the guest's p2m table. */
+int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
+                    unsigned long gpfn, struct domain *fdom);
 /* 
  * Populate-on-demand
  */
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 52+ messages in thread

* [V7 PATCH 7/7] pvh dom0: add opt_dom0pvh to setup.c
  2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (5 preceding siblings ...)
  2013-12-17  2:38 ` [V7 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
@ 2013-12-17  2:38 ` Mukesh Rathor
  2013-12-17 14:46 ` [V7 PATCH 0/7]: PVH dom0 Konrad Rzeszutek Wilk
  7 siblings, 0 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17  2:38 UTC (permalink / raw)
  To: Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/setup.c |   12 +++++++++---
 1 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 82376d3..3282fad 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -68,6 +68,10 @@ integer_param("maxcpus", max_cpus);
 static bool_t __initdata disable_smep;
 invbool_param("smep", disable_smep);
 
+/* Boot dom0 in pvh mode */
+bool_t __initdata opt_dom0pvh;
+boolean_param("dom0pvh", opt_dom0pvh);
+
 /* **** Linux config option: propagated to domain0. */
 /* "acpi=off":    Sisables both ACPI table parsing and interpreter. */
 /* "acpi=force":  Override the disable blacklist.                   */
@@ -552,7 +556,7 @@ void __init __start_xen(unsigned long mbi_p)
 {
     char *memmap_type = NULL;
     char *cmdline, *kextra, *loader;
-    unsigned int initrdidx;
+    unsigned int initrdidx, domcr_flags = 0;
     multiboot_info_t *mbi = __va(mbi_p);
     module_t *mod = (module_t *)__va(mbi->mods_addr);
     unsigned long nr_pages, raw_max_page, modules_headroom, *module_map;
@@ -1342,8 +1346,10 @@ void __init __start_xen(unsigned long mbi_p)
     if ( !tboot_protect_mem_regions() )
         panic("Could not protect TXT memory regions");
 
-    /* Create initial domain 0. */
-    dom0 = domain_create(0, DOMCRF_s3_integrity, 0);
+     /* Create initial domain 0. */
+     domcr_flags = (opt_dom0pvh ? DOMCRF_pvh | DOMCRF_hap : 0);
+     domcr_flags |= DOMCRF_s3_integrity;
+     dom0 = domain_create(0, domcr_flags, 0);
     if ( IS_ERR(dom0) || (alloc_dom0_vcpu0() == NULL) )
         panic("Error creating domain 0");
 
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-17  2:38 ` [V7 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
@ 2013-12-17  8:32   ` Jan Beulich
  2013-12-18  0:19     ` Mukesh Rathor
  2013-12-19 15:50   ` Daniel De Graaf
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 52+ messages in thread
From: Jan Beulich @ 2013-12-17  8:32 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, xen-devel, dgdegra, keir.xen, tim

>>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> In preparation for the next patch, we update xsm_add_to_physmap to
> allow for checking of foreign domain. Thus, the current domain must
> have the right to update the mappings of target domain with pages from
> foreign domain.
> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> ---
>  xen/arch/arm/mm.c       |    4 ++--
>  xen/arch/x86/mm.c       |   18 +++++++++++++++---
>  xen/include/xsm/dummy.h |   10 ++++++++--
>  xen/include/xsm/xsm.h   |    6 +++---
>  xen/xsm/flask/hooks.c   |    9 +++++++--
>  5 files changed, 35 insertions(+), 12 deletions(-)

This one continues to lack an ack from Daniel, and you continue to
fail to Cc him...

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17  2:38 ` [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
@ 2013-12-17 13:07   ` Jan Beulich
  2013-12-17 13:59     ` Ian Campbell
  2013-12-17 23:57     ` Mukesh Rathor
  2013-12-17 16:56   ` Jan Beulich
  1 sibling, 2 replies; 52+ messages in thread
From: Jan Beulich @ 2013-12-17 13:07 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, xen-devel, keir.xen, tim

>>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -4675,6 +4675,39 @@ static int xenmem_add_to_physmap(struct domain *d,
>      return xenmem_add_to_physmap_once(d, xatp);
>  }
>  
> +static int xenmem_add_to_physmap_range(struct domain *d,
> +                                       struct xen_add_to_physmap_range *xatpr)
> +{
> +    /* Process entries in reverse order to allow continuations */
> +    while ( xatpr->size > 0 )
> +    {
> +        int rc;
> +        xen_ulong_t idx;
> +        xen_pfn_t gpfn;
> +        struct xen_add_to_physmap xatp;
> +
> +        if ( copy_from_guest_offset(&idx, xatpr->idxs, xatpr->size-1, 1)  ||
> +             copy_from_guest_offset(&gpfn, xatpr->gpfns, xatpr->size-1, 1) )
> +            return -EFAULT;
> +
> +        xatp.space = xatpr->space;
> +        xatp.idx = idx;
> +        xatp.gpfn = gpfn;
> +        rc = xenmem_add_to_physmap_once(d, &xatp);
> +
> +        if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
> +            return -EFAULT;
> +
> +        xatpr->size--;
> +
> +        /* Check for continuation if it's not the last interation */
> +        if ( xatpr->size > 0 && hypercall_preempt_check() )
> +            return -EAGAIN;
> +    }
> +
> +    return 0;
> +}

Now that I started looking into creating the compat wrapper for this,
I realize that processing this request backwards is wrong: The effect
of the entire hypercall can end up being different between forward
and reverse processing, if an index or gpfn happens to be twice in
the set. While that's perhaps not the usual thing to do, you never
know how a creative user of the interface may find it useful to do so.

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17 13:07   ` Jan Beulich
@ 2013-12-17 13:59     ` Ian Campbell
  2013-12-17 14:36       ` Jan Beulich
  2013-12-17 23:57     ` Mukesh Rathor
  1 sibling, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2013-12-17 13:59 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, xen-devel, keir.xen, tim

On Tue, 2013-12-17 at 13:07 +0000, Jan Beulich wrote:
> >>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> > --- a/xen/arch/x86/mm.c
> > +++ b/xen/arch/x86/mm.c
> > @@ -4675,6 +4675,39 @@ static int xenmem_add_to_physmap(struct domain *d,
> >      return xenmem_add_to_physmap_once(d, xatp);
> >  }
> >  
> > +static int xenmem_add_to_physmap_range(struct domain *d,
> > +                                       struct xen_add_to_physmap_range *xatpr)
> > +{
> > +    /* Process entries in reverse order to allow continuations */
> > +    while ( xatpr->size > 0 )
> > +    {
> > +        int rc;
> > +        xen_ulong_t idx;
> > +        xen_pfn_t gpfn;
> > +        struct xen_add_to_physmap xatp;
> > +
> > +        if ( copy_from_guest_offset(&idx, xatpr->idxs, xatpr->size-1, 1)  ||
> > +             copy_from_guest_offset(&gpfn, xatpr->gpfns, xatpr->size-1, 1) )
> > +            return -EFAULT;
> > +
> > +        xatp.space = xatpr->space;
> > +        xatp.idx = idx;
> > +        xatp.gpfn = gpfn;
> > +        rc = xenmem_add_to_physmap_once(d, &xatp);
> > +
> > +        if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
> > +            return -EFAULT;
> > +
> > +        xatpr->size--;
> > +
> > +        /* Check for continuation if it's not the last interation */
> > +        if ( xatpr->size > 0 && hypercall_preempt_check() )
> > +            return -EAGAIN;
> > +    }
> > +
> > +    return 0;
> > +}
> 
> Now that I started looking into creating the compat wrapper for this,
> I realize that processing this request backwards is wrong: The effect
> of the entire hypercall can end up being different between forward
> and reverse processing, if an index or gpfn happens to be twice in
> the set. While that's perhaps not the usual thing to do, you never
> know how a creative user of the interface may find it useful to do so.

Hrm, the ARM code does things this way as well. But you are of course
right.

We could change the code but we could also tighten the interface
requirements, either by explicit specifying that the range is handled in
reverse order or by mandating that index/gpfn must not be repeated
(whether or not we actively try and detect such cases).

Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17 13:59     ` Ian Campbell
@ 2013-12-17 14:36       ` Jan Beulich
  2013-12-17 14:40         ` Ian Campbell
  0 siblings, 1 reply; 52+ messages in thread
From: Jan Beulich @ 2013-12-17 14:36 UTC (permalink / raw)
  To: Ian Campbell; +Cc: george.dunlap, xen-devel, keir.xen, tim

>>> On 17.12.13 at 14:59, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> On Tue, 2013-12-17 at 13:07 +0000, Jan Beulich wrote:
>> >>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
>> > --- a/xen/arch/x86/mm.c
>> > +++ b/xen/arch/x86/mm.c
>> > @@ -4675,6 +4675,39 @@ static int xenmem_add_to_physmap(struct domain *d,
>> >      return xenmem_add_to_physmap_once(d, xatp);
>> >  }
>> >  
>> > +static int xenmem_add_to_physmap_range(struct domain *d,
>> > +                                       struct xen_add_to_physmap_range 
> *xatpr)
>> > +{
>> > +    /* Process entries in reverse order to allow continuations */
>> > +    while ( xatpr->size > 0 )
>> > +    {
>> > +        int rc;
>> > +        xen_ulong_t idx;
>> > +        xen_pfn_t gpfn;
>> > +        struct xen_add_to_physmap xatp;
>> > +
>> > +        if ( copy_from_guest_offset(&idx, xatpr->idxs, xatpr->size-1, 1)  ||
>> > +             copy_from_guest_offset(&gpfn, xatpr->gpfns, xatpr->size-1, 1) )
>> > +            return -EFAULT;
>> > +
>> > +        xatp.space = xatpr->space;
>> > +        xatp.idx = idx;
>> > +        xatp.gpfn = gpfn;
>> > +        rc = xenmem_add_to_physmap_once(d, &xatp);
>> > +
>> > +        if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
>> > +            return -EFAULT;
>> > +
>> > +        xatpr->size--;
>> > +
>> > +        /* Check for continuation if it's not the last interation */
>> > +        if ( xatpr->size > 0 && hypercall_preempt_check() )
>> > +            return -EAGAIN;
>> > +    }
>> > +
>> > +    return 0;
>> > +}
>> 
>> Now that I started looking into creating the compat wrapper for this,
>> I realize that processing this request backwards is wrong: The effect
>> of the entire hypercall can end up being different between forward
>> and reverse processing, if an index or gpfn happens to be twice in
>> the set. While that's perhaps not the usual thing to do, you never
>> know how a creative user of the interface may find it useful to do so.
> 
> Hrm, the ARM code does things this way as well. But you are of course
> right.
> 
> We could change the code but we could also tighten the interface
> requirements, either by explicit specifying that the range is handled in
> reverse order or by mandating that index/gpfn must not be repeated
> (whether or not we actively try and detect such cases).

Specifying that this gets processed backwards would be, well,
backwards. Requiring no duplicates (or else getting undefined
behavior) would be possible. But processing the operation in the
conventional order doesn't seem all that hard.

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17 14:36       ` Jan Beulich
@ 2013-12-17 14:40         ` Ian Campbell
  2013-12-17 15:11           ` Jan Beulich
  0 siblings, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2013-12-17 14:40 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, xen-devel, keir.xen, tim

On Tue, 2013-12-17 at 14:36 +0000, Jan Beulich wrote:
> >>> On 17.12.13 at 14:59, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > On Tue, 2013-12-17 at 13:07 +0000, Jan Beulich wrote:
> >> >>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> >> > --- a/xen/arch/x86/mm.c
> >> > +++ b/xen/arch/x86/mm.c
> >> > @@ -4675,6 +4675,39 @@ static int xenmem_add_to_physmap(struct domain *d,
> >> >      return xenmem_add_to_physmap_once(d, xatp);
> >> >  }
> >> >  
> >> > +static int xenmem_add_to_physmap_range(struct domain *d,
> >> > +                                       struct xen_add_to_physmap_range 
> > *xatpr)
> >> > +{
> >> > +    /* Process entries in reverse order to allow continuations */
> >> > +    while ( xatpr->size > 0 )
> >> > +    {
> >> > +        int rc;
> >> > +        xen_ulong_t idx;
> >> > +        xen_pfn_t gpfn;
> >> > +        struct xen_add_to_physmap xatp;
> >> > +
> >> > +        if ( copy_from_guest_offset(&idx, xatpr->idxs, xatpr->size-1, 1)  ||
> >> > +             copy_from_guest_offset(&gpfn, xatpr->gpfns, xatpr->size-1, 1) )
> >> > +            return -EFAULT;
> >> > +
> >> > +        xatp.space = xatpr->space;
> >> > +        xatp.idx = idx;
> >> > +        xatp.gpfn = gpfn;
> >> > +        rc = xenmem_add_to_physmap_once(d, &xatp);
> >> > +
> >> > +        if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1) )
> >> > +            return -EFAULT;
> >> > +
> >> > +        xatpr->size--;
> >> > +
> >> > +        /* Check for continuation if it's not the last interation */
> >> > +        if ( xatpr->size > 0 && hypercall_preempt_check() )
> >> > +            return -EAGAIN;
> >> > +    }
> >> > +
> >> > +    return 0;
> >> > +}
> >> 
> >> Now that I started looking into creating the compat wrapper for this,
> >> I realize that processing this request backwards is wrong: The effect
> >> of the entire hypercall can end up being different between forward
> >> and reverse processing, if an index or gpfn happens to be twice in
> >> the set. While that's perhaps not the usual thing to do, you never
> >> know how a creative user of the interface may find it useful to do so.
> > 
> > Hrm, the ARM code does things this way as well. But you are of course
> > right.
> > 
> > We could change the code but we could also tighten the interface
> > requirements, either by explicit specifying that the range is handled in
> > reverse order or by mandating that index/gpfn must not be repeated
> > (whether or not we actively try and detect such cases).
> 
> Specifying that this gets processed backwards would be, well,
> backwards. Requiring no duplicates (or else getting undefined
> behavior) would be possible. But processing the operation in the
> conventional order doesn't seem all that hard.

The reason I thought it would be tricky was finding somewhere to stash
the progress over the continuation. Do you have a cunning plan?

In principal the ABI on ARM is still subject to change (until 4.4) and
on x86 it does exist yet but TBH I'd rather find a way not too, either a
clever continuation trick or adding restrictions to the call.

Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 0/7]: PVH dom0....
  2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
                   ` (6 preceding siblings ...)
  2013-12-17  2:38 ` [V7 PATCH 7/7] pvh dom0: add opt_dom0pvh to setup.c Mukesh Rathor
@ 2013-12-17 14:46 ` Konrad Rzeszutek Wilk
  2013-12-18  0:14   ` Mukesh Rathor
  7 siblings, 1 reply; 52+ messages in thread
From: Konrad Rzeszutek Wilk @ 2013-12-17 14:46 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, Xen-devel, tim, keir.xen, JBeulich

On Mon, Dec 16, 2013 at 06:38:24PM -0800, Mukesh Rathor wrote:
> Hi,
> 
> V7: The only change from V6 is in patch #6:
>       - remove p2m_rem_foreign: this code can be just skipped now as the
>         most critical thing to release refcnt has been moved to ept
>       - fixup get_page_from_gfn so that it returns refcnt for foreign 
>         pages also, altho this is redundant, as a refcnt is already held.
>         But this simplifies code and helps ARM also.
> 
> 
> These patches are based on c/s: e423b5cd60ff95ba3680e2e4a8440d4d19b2b13e

Could you also put it on your git tree please?

Thank you!
> 
> thanks
> Mukesh
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17 14:40         ` Ian Campbell
@ 2013-12-17 15:11           ` Jan Beulich
  2013-12-17 15:34             ` Ian Campbell
  2013-12-18  7:55             ` Jan Beulich
  0 siblings, 2 replies; 52+ messages in thread
From: Jan Beulich @ 2013-12-17 15:11 UTC (permalink / raw)
  To: Ian Campbell; +Cc: george.dunlap, xen-devel, keir.xen, tim

>>> On 17.12.13 at 15:40, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> On Tue, 2013-12-17 at 14:36 +0000, Jan Beulich wrote:
>> >>> On 17.12.13 at 14:59, Ian Campbell <Ian.Campbell@citrix.com> wrote:
>> > We could change the code but we could also tighten the interface
>> > requirements, either by explicit specifying that the range is handled in
>> > reverse order or by mandating that index/gpfn must not be repeated
>> > (whether or not we actively try and detect such cases).
>> 
>> Specifying that this gets processed backwards would be, well,
>> backwards. Requiring no duplicates (or else getting undefined
>> behavior) would be possible. But processing the operation in the
>> conventional order doesn't seem all that hard.
> 
> The reason I thought it would be tricky was finding somewhere to stash
> the progress over the continuation. Do you have a cunning plan?

Just like we do in other cases - in the struct that was passed to
us by the caller (incrementing the handles and decrementing the
count as needed).

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17 15:11           ` Jan Beulich
@ 2013-12-17 15:34             ` Ian Campbell
  2013-12-18  7:55             ` Jan Beulich
  1 sibling, 0 replies; 52+ messages in thread
From: Ian Campbell @ 2013-12-17 15:34 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, xen-devel, keir.xen, tim

On Tue, 2013-12-17 at 15:11 +0000, Jan Beulich wrote:
> >>> On 17.12.13 at 15:40, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > On Tue, 2013-12-17 at 14:36 +0000, Jan Beulich wrote:
> >> >>> On 17.12.13 at 14:59, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> >> > We could change the code but we could also tighten the interface
> >> > requirements, either by explicit specifying that the range is handled in
> >> > reverse order or by mandating that index/gpfn must not be repeated
> >> > (whether or not we actively try and detect such cases).
> >> 
> >> Specifying that this gets processed backwards would be, well,
> >> backwards. Requiring no duplicates (or else getting undefined
> >> behavior) would be possible. But processing the operation in the
> >> conventional order doesn't seem all that hard.
> > 
> > The reason I thought it would be tricky was finding somewhere to stash
> > the progress over the continuation. Do you have a cunning plan?
> 
> Just like we do in other cases - in the struct that was passed to
> us by the caller (incrementing the handles and decrementing the
> count as needed).

Ah right, forgot we were allowed to fiddle with the handle...

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17  2:38 ` [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
  2013-12-17 13:07   ` Jan Beulich
@ 2013-12-17 16:56   ` Jan Beulich
  1 sibling, 0 replies; 52+ messages in thread
From: Jan Beulich @ 2013-12-17 16:56 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, xen-devel, keir.xen, tim

>>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> +    case XENMEM_add_to_physmap_range:
> +    {
> +        struct xen_add_to_physmap_range xatpr;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&xatpr, arg, 1) )
> +            return -EFAULT;
> +
> +        /* This mapspace is redundant for this hypercall */
> +        if ( xatpr.space == XENMAPSPACE_gmfn_range )
> +            return -EINVAL;
> +
> +        d = rcu_lock_domain_by_any_id(xatpr.domid);
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d)) == 0 )
> +            rc = xenmem_add_to_physmap_range(d, &xatpr);
> +
> +        rcu_unlock_domain(d);
> +
> +        if ( rc == -EAGAIN )
> +            rc = hypercall_create_continuation(
> +                __HYPERVISOR_memory_op, "ih", op, arg);

There's another bug here: You don't copy back xatpr, and hence
when the hypercall gets resumed, you'll again find the original
size rather than the reduced one.

Jan

> +
> +        return rc;
> +    }

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17 13:07   ` Jan Beulich
  2013-12-17 13:59     ` Ian Campbell
@ 2013-12-17 23:57     ` Mukesh Rathor
  2013-12-18 10:00       ` Ian Campbell
  1 sibling, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-17 23:57 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, xen-devel, keir.xen, tim

On Tue, 17 Dec 2013 13:07:22 +0000
"Jan Beulich" <JBeulich@suse.com> wrote:

> >>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com>
> >>> wrote:
> > --- a/xen/arch/x86/mm.c
> > +++ b/xen/arch/x86/mm.c
> > @@ -4675,6 +4675,39 @@ static int xenmem_add_to_physmap(struct
> > domain *d, return xenmem_add_to_physmap_once(d, xatp);
> >  }
> >  
> > +static int xenmem_add_to_physmap_range(struct domain *d,
> > +                                       struct
> > xen_add_to_physmap_range *xatpr) +{
> > +    /* Process entries in reverse order to allow continuations */
> > +    while ( xatpr->size > 0 )
> > +    {
> > +        int rc;
> > +        xen_ulong_t idx;
> > +        xen_pfn_t gpfn;
> > +        struct xen_add_to_physmap xatp;
> > +
> > +        if ( copy_from_guest_offset(&idx, xatpr->idxs,
> > xatpr->size-1, 1)  ||
> > +             copy_from_guest_offset(&gpfn, xatpr->gpfns,
> > xatpr->size-1, 1) )
> > +            return -EFAULT;
> > +
> > +        xatp.space = xatpr->space;
> > +        xatp.idx = idx;
> > +        xatp.gpfn = gpfn;
> > +        rc = xenmem_add_to_physmap_once(d, &xatp);
> > +
> > +        if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc,
> > 1) )
> > +            return -EFAULT;
> > +
> > +        xatpr->size--;
> > +
> > +        /* Check for continuation if it's not the last interation
> > */
> > +        if ( xatpr->size > 0 && hypercall_preempt_check() )
> > +            return -EAGAIN;
> > +    }
> > +
> > +    return 0;
> > +}
> 
> Now that I started looking into creating the compat wrapper for this,
> I realize that processing this request backwards is wrong: The effect
> of the entire hypercall can end up being different between forward
> and reverse processing, if an index or gpfn happens to be twice in
> the set. While that's perhaps not the usual thing to do, you never
> know how a creative user of the interface may find it useful to do so.

IIRC, I had gotten that from the arm guys.... it seems plausible to say
the mappings are unpredictable in case of repeated idx/gpfn, but I'll
see what they are doing to fix, or just change it myself here.

thanks for noticing,
Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 0/7]: PVH dom0....
  2013-12-17 14:46 ` [V7 PATCH 0/7]: PVH dom0 Konrad Rzeszutek Wilk
@ 2013-12-18  0:14   ` Mukesh Rathor
  0 siblings, 0 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-18  0:14 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk; +Cc: george.dunlap, Xen-devel, tim, keir.xen, JBeulich

On Tue, 17 Dec 2013 09:46:57 -0500
Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote:

> On Mon, Dec 16, 2013 at 06:38:24PM -0800, Mukesh Rathor wrote:
> > Hi,
> > 
> > V7: The only change from V6 is in patch #6:
> >       - remove p2m_rem_foreign: this code can be just skipped now
> > as the most critical thing to release refcnt has been moved to ept
> >       - fixup get_page_from_gfn so that it returns refcnt for
> > foreign pages also, altho this is redundant, as a refcnt is already
> > held. But this simplifies code and helps ARM also.
> > 
> > 
> > These patches are based on c/s:
> > e423b5cd60ff95ba3680e2e4a8440d4d19b2b13e
> 
> Could you also put it on your git tree please?

git://oss.oracle.com/git/mrathor/xen.git  branch: dom0pvh-v7

thanks
Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-17  8:32   ` Jan Beulich
@ 2013-12-18  0:19     ` Mukesh Rathor
  2013-12-18  8:07       ` Jan Beulich
  0 siblings, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-18  0:19 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, xen-devel, dgdegra, keir.xen, tim

On Tue, 17 Dec 2013 08:32:41 +0000
"Jan Beulich" <JBeulich@suse.com> wrote:

> >>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com>
> >>> wrote:
> > In preparation for the next patch, we update xsm_add_to_physmap to
> > allow for checking of foreign domain. Thus, the current domain must
> > have the right to update the mappings of target domain with pages
> > from foreign domain.
> > 
> > Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> > ---
> >  xen/arch/arm/mm.c       |    4 ++--
> >  xen/arch/x86/mm.c       |   18 +++++++++++++++---
> >  xen/include/xsm/dummy.h |   10 ++++++++--
> >  xen/include/xsm/xsm.h   |    6 +++---
> >  xen/xsm/flask/hooks.c   |    9 +++++++--
> >  5 files changed, 35 insertions(+), 12 deletions(-)
> 
> This one continues to lack an ack from Daniel, and you continue to
> fail to Cc him...

That is incorrect. He was CC'd in the last few versions since the
fix, and I had also ping'd him in the cover letter. Either he's on
holiday or ... no comment (he does work for the n-s-a :-) ).... 

thanks
Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17 15:11           ` Jan Beulich
  2013-12-17 15:34             ` Ian Campbell
@ 2013-12-18  7:55             ` Jan Beulich
  2013-12-18 10:07               ` Ian Campbell
  1 sibling, 1 reply; 52+ messages in thread
From: Jan Beulich @ 2013-12-18  7:55 UTC (permalink / raw)
  To: Ian Campbell, Mukesh Rathor, Keir Fraser; +Cc: george.dunlap, xen-devel, tim

>>> On 17.12.13 at 16:11, "Jan Beulich" <JBeulich@suse.com> wrote:
>>>> On 17.12.13 at 15:40, Ian Campbell <Ian.Campbell@citrix.com> wrote:
>> On Tue, 2013-12-17 at 14:36 +0000, Jan Beulich wrote:
>>> >>> On 17.12.13 at 14:59, Ian Campbell <Ian.Campbell@citrix.com> wrote:
>>> > We could change the code but we could also tighten the interface
>>> > requirements, either by explicit specifying that the range is handled in
>>> > reverse order or by mandating that index/gpfn must not be repeated
>>> > (whether or not we actively try and detect such cases).
>>> 
>>> Specifying that this gets processed backwards would be, well,
>>> backwards. Requiring no duplicates (or else getting undefined
>>> behavior) would be possible. But processing the operation in the
>>> conventional order doesn't seem all that hard.
>> 
>> The reason I thought it would be tricky was finding somewhere to stash
>> the progress over the continuation. Do you have a cunning plan?
> 
> Just like we do in other cases - in the struct that was passed to
> us by the caller (incrementing the handles and decrementing the
> count as needed).

And I was wrong with this - there's no proper precedent of us
modifying hypercall interface structures except where certain
fields are specified to be outputs.

For XENMEM_add_to_physmap_range none of the fields is, so
even copying back the size field (like currently done on ARM,
and like also done for XENMEM_add_to_physmap's
XENMAPSPACE_gmfn_range sub-case) isn't really correct.
Instead the general method for encoding the continuation
point in mem-ops is to put the resume index in the high bits of
the first hypercall argument.

Whether we want to change the specification here (clarifying
that all of the structure may be modified by the hypervisor in
the course of executing the hypercall) instead of fixing the
implementation is open for discussion.

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-18  0:19     ` Mukesh Rathor
@ 2013-12-18  8:07       ` Jan Beulich
  0 siblings, 0 replies; 52+ messages in thread
From: Jan Beulich @ 2013-12-18  8:07 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, xen-devel, dgdegra, keir.xen, tim

>>> On 18.12.13 at 01:19, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> On Tue, 17 Dec 2013 08:32:41 +0000
> "Jan Beulich" <JBeulich@suse.com> wrote:
> 
>> >>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com>
>> >>> wrote:
>> > In preparation for the next patch, we update xsm_add_to_physmap to
>> > allow for checking of foreign domain. Thus, the current domain must
>> > have the right to update the mappings of target domain with pages
>> > from foreign domain.
>> > 
>> > Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
>> > ---
>> >  xen/arch/arm/mm.c       |    4 ++--
>> >  xen/arch/x86/mm.c       |   18 +++++++++++++++---
>> >  xen/include/xsm/dummy.h |   10 ++++++++--
>> >  xen/include/xsm/xsm.h   |    6 +++---
>> >  xen/xsm/flask/hooks.c   |    9 +++++++--
>> >  5 files changed, 35 insertions(+), 12 deletions(-)
>> 
>> This one continues to lack an ack from Daniel, and you continue to
>> fail to Cc him...
> 
> That is incorrect. He was CC'd in the last few versions since the
> fix, and I had also ping'd him in the cover letter. Either he's on
> holiday or ... no comment (he does work for the n-s-a :-) ).... 

Cc-ing him on the cover letter in insufficient (and I'd personally
even consider that sort of spam) - he needs to be Cc-ed on the
actual patch that you want him to approve. And that's
independent of any (unfortunate) need for pings or the sending
of earlier revisions.

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-17 23:57     ` Mukesh Rathor
@ 2013-12-18 10:00       ` Ian Campbell
  0 siblings, 0 replies; 52+ messages in thread
From: Ian Campbell @ 2013-12-18 10:00 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: george.dunlap, xen-devel, keir.xen, tim, Jan Beulich

On Tue, 2013-12-17 at 15:57 -0800, Mukesh Rathor wrote:
> On Tue, 17 Dec 2013 13:07:22 +0000
> "Jan Beulich" <JBeulich@suse.com> wrote:
> 
> > >>> On 17.12.13 at 03:38, Mukesh Rathor <mukesh.rathor@oracle.com>
> > >>> wrote:
> > > --- a/xen/arch/x86/mm.c
> > > +++ b/xen/arch/x86/mm.c
> > > @@ -4675,6 +4675,39 @@ static int xenmem_add_to_physmap(struct
> > > domain *d, return xenmem_add_to_physmap_once(d, xatp);
> > >  }
> > >  
> > > +static int xenmem_add_to_physmap_range(struct domain *d,
> > > +                                       struct
> > > xen_add_to_physmap_range *xatpr) +{
> > > +    /* Process entries in reverse order to allow continuations */
> > > +    while ( xatpr->size > 0 )
> > > +    {
> > > +        int rc;
> > > +        xen_ulong_t idx;
> > > +        xen_pfn_t gpfn;
> > > +        struct xen_add_to_physmap xatp;
> > > +
> > > +        if ( copy_from_guest_offset(&idx, xatpr->idxs,
> > > xatpr->size-1, 1)  ||
> > > +             copy_from_guest_offset(&gpfn, xatpr->gpfns,
> > > xatpr->size-1, 1) )
> > > +            return -EFAULT;
> > > +
> > > +        xatp.space = xatpr->space;
> > > +        xatp.idx = idx;
> > > +        xatp.gpfn = gpfn;
> > > +        rc = xenmem_add_to_physmap_once(d, &xatp);
> > > +
> > > +        if ( copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc,
> > > 1) )
> > > +            return -EFAULT;
> > > +
> > > +        xatpr->size--;
> > > +
> > > +        /* Check for continuation if it's not the last interation
> > > */
> > > +        if ( xatpr->size > 0 && hypercall_preempt_check() )
> > > +            return -EAGAIN;
> > > +    }
> > > +
> > > +    return 0;
> > > +}
> > 
> > Now that I started looking into creating the compat wrapper for this,
> > I realize that processing this request backwards is wrong: The effect
> > of the entire hypercall can end up being different between forward
> > and reverse processing, if an index or gpfn happens to be twice in
> > the set. While that's perhaps not the usual thing to do, you never
> > know how a creative user of the interface may find it useful to do so.
> 
> IIRC, I had gotten that from the arm guys.... it seems plausible to say
> the mappings are unpredictable in case of repeated idx/gpfn, but I'll
> see what they are doing to fix, or just change it myself here.

I sent the fix a few minutes ok. you and Jan were CCd.

Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-18  7:55             ` Jan Beulich
@ 2013-12-18 10:07               ` Ian Campbell
  2013-12-18 10:34                 ` Jan Beulich
  0 siblings, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2013-12-18 10:07 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, xen-devel, Keir Fraser, tim

On Wed, 2013-12-18 at 07:55 +0000, Jan Beulich wrote:
> >>> On 17.12.13 at 16:11, "Jan Beulich" <JBeulich@suse.com> wrote:
> >>>> On 17.12.13 at 15:40, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> >> On Tue, 2013-12-17 at 14:36 +0000, Jan Beulich wrote:
> >>> >>> On 17.12.13 at 14:59, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> >>> > We could change the code but we could also tighten the interface
> >>> > requirements, either by explicit specifying that the range is handled in
> >>> > reverse order or by mandating that index/gpfn must not be repeated
> >>> > (whether or not we actively try and detect such cases).
> >>> 
> >>> Specifying that this gets processed backwards would be, well,
> >>> backwards. Requiring no duplicates (or else getting undefined
> >>> behavior) would be possible. But processing the operation in the
> >>> conventional order doesn't seem all that hard.
> >> 
> >> The reason I thought it would be tricky was finding somewhere to stash
> >> the progress over the continuation. Do you have a cunning plan?
> > 
> > Just like we do in other cases - in the struct that was passed to
> > us by the caller (incrementing the handles and decrementing the
> > count as needed).
> 
> And I was wrong with this - there's no proper precedent of us
> modifying hypercall interface structures except where certain
> fields are specified to be outputs.
> 
> For XENMEM_add_to_physmap_range none of the fields is, so
> even copying back the size field (like currently done on ARM,
> and like also done for XENMEM_add_to_physmap's
> XENMAPSPACE_gmfn_range sub-case) isn't really correct.
> Instead the general method for encoding the continuation
> point in mem-ops is to put the resume index in the high bits of
> the first hypercall argument.
> 
> Whether we want to change the specification here (clarifying
> that all of the structure may be modified by the hypervisor in
> the course of executing the hypercall) instead of fixing the
> implementation is open for discussion.

Isn't x86's xenmem_add_to_physmap precedent here? It modifies idx, gpfn
and size which are not specified as outputs (more by omission than being
explicitly inputs, but the implication of the comments is that they are
inputs).

I'm happy to change the spec here. I think in general very few guests
are going to be relying on the guest handle after the hypercall (they
have the original array in their hand) and in this specific case I know
that neither the x86 nor ARM implementation on Linux do so, and those
are the only existing dom0s which use this h/call right now I think.

The alternative is to use MEMOP_EXTENT_SHIFT/MEMOP_CMD_MASK? I'd rather
avoid that if we don't have to...

Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-18 10:07               ` Ian Campbell
@ 2013-12-18 10:34                 ` Jan Beulich
  2013-12-18 10:41                   ` Ian Campbell
  0 siblings, 1 reply; 52+ messages in thread
From: Jan Beulich @ 2013-12-18 10:34 UTC (permalink / raw)
  To: Ian Campbell; +Cc: george.dunlap, xen-devel, Keir Fraser, tim

>>> On 18.12.13 at 11:07, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> On Wed, 2013-12-18 at 07:55 +0000, Jan Beulich wrote:
>> >>> On 17.12.13 at 16:11, "Jan Beulich" <JBeulich@suse.com> wrote:
>> >>>> On 17.12.13 at 15:40, Ian Campbell <Ian.Campbell@citrix.com> wrote:
>> >> On Tue, 2013-12-17 at 14:36 +0000, Jan Beulich wrote:
>> >>> >>> On 17.12.13 at 14:59, Ian Campbell <Ian.Campbell@citrix.com> wrote:
>> >>> > We could change the code but we could also tighten the interface
>> >>> > requirements, either by explicit specifying that the range is handled in
>> >>> > reverse order or by mandating that index/gpfn must not be repeated
>> >>> > (whether or not we actively try and detect such cases).
>> >>> 
>> >>> Specifying that this gets processed backwards would be, well,
>> >>> backwards. Requiring no duplicates (or else getting undefined
>> >>> behavior) would be possible. But processing the operation in the
>> >>> conventional order doesn't seem all that hard.
>> >> 
>> >> The reason I thought it would be tricky was finding somewhere to stash
>> >> the progress over the continuation. Do you have a cunning plan?
>> > 
>> > Just like we do in other cases - in the struct that was passed to
>> > us by the caller (incrementing the handles and decrementing the
>> > count as needed).
>> 
>> And I was wrong with this - there's no proper precedent of us
>> modifying hypercall interface structures except where certain
>> fields are specified to be outputs.
>> 
>> For XENMEM_add_to_physmap_range none of the fields is, so
>> even copying back the size field (like currently done on ARM,
>> and like also done for XENMEM_add_to_physmap's
>> XENMAPSPACE_gmfn_range sub-case) isn't really correct.
>> Instead the general method for encoding the continuation
>> point in mem-ops is to put the resume index in the high bits of
>> the first hypercall argument.
>> 
>> Whether we want to change the specification here (clarifying
>> that all of the structure may be modified by the hypervisor in
>> the course of executing the hypercall) instead of fixing the
>> implementation is open for discussion.
> 
> Isn't x86's xenmem_add_to_physmap precedent here? It modifies idx, gpfn
> and size which are not specified as outputs (more by omission than being
> explicitly inputs, but the implication of the comments is that they are
> inputs).

Right, as I said above. Yet that went in without anyone noticing
the inconsistent behavior, and hence I'd call it a bug.

> I'm happy to change the spec here. I think in general very few guests
> are going to be relying on the guest handle after the hypercall (they
> have the original array in their hand) and in this specific case I know
> that neither the x86 nor ARM implementation on Linux do so, and those
> are the only existing dom0s which use this h/call right now I think.

If a kernel chose to use static (say per-CPU) arrays and a static
interface structure here, it might easily set the handles just once...

> The alternative is to use MEMOP_EXTENT_SHIFT/MEMOP_CMD_MASK? I'd rather
> avoid that if we don't have to...

Admittedly it's not the nicest model, but that's how other memops
work. Question really is whether we want to allow the inconsistency
here, or generally allow the modification of documented to be input
only interface structure fields.

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-18 10:34                 ` Jan Beulich
@ 2013-12-18 10:41                   ` Ian Campbell
  2013-12-18 10:55                     ` Jan Beulich
  0 siblings, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2013-12-18 10:41 UTC (permalink / raw)
  To: Jan Beulich; +Cc: george.dunlap, xen-devel, Keir Fraser, tim

On Wed, 2013-12-18 at 10:34 +0000, Jan Beulich wrote:
> >>> On 18.12.13 at 11:07, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > On Wed, 2013-12-18 at 07:55 +0000, Jan Beulich wrote:
> >> >>> On 17.12.13 at 16:11, "Jan Beulich" <JBeulich@suse.com> wrote:
> >> >>>> On 17.12.13 at 15:40, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> >> >> On Tue, 2013-12-17 at 14:36 +0000, Jan Beulich wrote:
> >> >>> >>> On 17.12.13 at 14:59, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> >> >>> > We could change the code but we could also tighten the interface
> >> >>> > requirements, either by explicit specifying that the range is handled in
> >> >>> > reverse order or by mandating that index/gpfn must not be repeated
> >> >>> > (whether or not we actively try and detect such cases).
> >> >>> 
> >> >>> Specifying that this gets processed backwards would be, well,
> >> >>> backwards. Requiring no duplicates (or else getting undefined
> >> >>> behavior) would be possible. But processing the operation in the
> >> >>> conventional order doesn't seem all that hard.
> >> >> 
> >> >> The reason I thought it would be tricky was finding somewhere to stash
> >> >> the progress over the continuation. Do you have a cunning plan?
> >> > 
> >> > Just like we do in other cases - in the struct that was passed to
> >> > us by the caller (incrementing the handles and decrementing the
> >> > count as needed).
> >> 
> >> And I was wrong with this - there's no proper precedent of us
> >> modifying hypercall interface structures except where certain
> >> fields are specified to be outputs.
> >> 
> >> For XENMEM_add_to_physmap_range none of the fields is, so
> >> even copying back the size field (like currently done on ARM,
> >> and like also done for XENMEM_add_to_physmap's
> >> XENMAPSPACE_gmfn_range sub-case) isn't really correct.
> >> Instead the general method for encoding the continuation
> >> point in mem-ops is to put the resume index in the high bits of
> >> the first hypercall argument.
> >> 
> >> Whether we want to change the specification here (clarifying
> >> that all of the structure may be modified by the hypervisor in
> >> the course of executing the hypercall) instead of fixing the
> >> implementation is open for discussion.
> > 
> > Isn't x86's xenmem_add_to_physmap precedent here? It modifies idx, gpfn
> > and size which are not specified as outputs (more by omission than being
> > explicitly inputs, but the implication of the comments is that they are
> > inputs).
> 
> Right, as I said above. Yet that went in without anyone noticing
> the inconsistent behavior, and hence I'd call it a bug.
> 
> > I'm happy to change the spec here. I think in general very few guests
> > are going to be relying on the guest handle after the hypercall (they
> > have the original array in their hand) and in this specific case I know
> > that neither the x86 nor ARM implementation on Linux do so, and those
> > are the only existing dom0s which use this h/call right now I think.
> 
> If a kernel chose to use static (say per-CPU) arrays and a static
> interface structure here, it might easily set the handles just once...

In theory, but at least in practice today it can't be because that would
fault. You are right that it is inconsistent though.

> > The alternative is to use MEMOP_EXTENT_SHIFT/MEMOP_CMD_MASK? I'd rather
> > avoid that if we don't have to...
> 
> Admittedly it's not the nicest model, but that's how other memops
> work. Question really is whether we want to allow the inconsistency
> here, or generally allow the modification of documented to be input
> only interface structure fields.

If you want to fix x86 then I don't mind making arm follow suit. (If you
wanted to do the arm change at the same time I would be very happy of
course, it should be mechanically pretty similar to the x86 change).

We could also consider taking my fix to count forward immediately, to
remove one inconsistency and remove the other issue afterwards, since
coming to rely on backwards processing is a harder hole to get out of
than going from modifying the strict to not.

Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86
  2013-12-18 10:41                   ` Ian Campbell
@ 2013-12-18 10:55                     ` Jan Beulich
  0 siblings, 0 replies; 52+ messages in thread
From: Jan Beulich @ 2013-12-18 10:55 UTC (permalink / raw)
  To: Ian Campbell; +Cc: george.dunlap, xen-devel, Keir Fraser, tim

>>> On 18.12.13 at 11:41, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> On Wed, 2013-12-18 at 10:34 +0000, Jan Beulich wrote:
>> >>> On 18.12.13 at 11:07, Ian Campbell <Ian.Campbell@citrix.com> wrote:
>> > The alternative is to use MEMOP_EXTENT_SHIFT/MEMOP_CMD_MASK? I'd rather
>> > avoid that if we don't have to...
>> 
>> Admittedly it's not the nicest model, but that's how other memops
>> work. Question really is whether we want to allow the inconsistency
>> here, or generally allow the modification of documented to be input
>> only interface structure fields.
> 
> If you want to fix x86 then I don't mind making arm follow suit. (If you
> wanted to do the arm change at the same time I would be very happy of
> course, it should be mechanically pretty similar to the x86 change).

I'll see how ugly it ends up being.

> We could also consider taking my fix to count forward immediately, to
> remove one inconsistency and remove the other issue afterwards, since
> coming to rely on backwards processing is a harder hole to get out of
> than going from modifying the strict to not.

Yes, we should certainly do that as a first step.

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-17  2:38 ` [V7 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
  2013-12-17  8:32   ` Jan Beulich
@ 2013-12-19 15:50   ` Daniel De Graaf
  2013-12-19 19:55     ` Mukesh Rathor
  2014-01-28  1:55   ` Mukesh Rathor
  2014-02-12 16:47   ` Julien Grall
  3 siblings, 1 reply; 52+ messages in thread
From: Daniel De Graaf @ 2013-12-19 15:50 UTC (permalink / raw)
  To: Mukesh Rathor, Xen-devel; +Cc: george.dunlap, keir.xen, tim, JBeulich

On 12/16/2013 09:38 PM, Mukesh Rathor wrote:
> In preparation for the next patch, we update xsm_add_to_physmap to
> allow for checking of foreign domain. Thus, the current domain must
> have the right to update the mappings of target domain with pages from
> foreign domain.
>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>

Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>

For some reason I only have v3 and cannot find the v4-v7 patches in
my Inbox, although I do have the copies that came via the xen-devel list.
I recall having some transient email issues during that time, so if
Oracle's mail servers have a more aggressive retry policy the messages
could have been lost due to that; I wasn't intending to ignore your patches.

> ---
>   xen/arch/arm/mm.c       |    4 ++--
>   xen/arch/x86/mm.c       |   18 +++++++++++++++---
>   xen/include/xsm/dummy.h |   10 ++++++++--
>   xen/include/xsm/xsm.h   |    6 +++---
>   xen/xsm/flask/hooks.c   |    9 +++++++--
>   5 files changed, 35 insertions(+), 12 deletions(-)
>
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 654281a..3515526 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -1134,7 +1134,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
>           if ( d == NULL )
>               return -ESRCH;
>
> -        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
> +        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL);
>           if ( rc )
>           {
>               rcu_unlock_domain(d);
> @@ -1165,7 +1165,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
>           if ( d == NULL )
>               return -ESRCH;
>
> -        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
> +        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL);
>           if ( rc )
>           {
>               rcu_unlock_domain(d);
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index ae332f4..0cae437 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -4730,7 +4730,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
>           if ( d == NULL )
>               return -ESRCH;
>
> -        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d) )
> +        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL) )
>           {
>               rcu_unlock_domain(d);
>               return -EPERM;
> @@ -4756,7 +4756,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
>       case XENMEM_add_to_physmap_range:
>       {
>           struct xen_add_to_physmap_range xatpr;
> -        struct domain *d;
> +        struct domain *d, *fd = NULL;
>
>           if ( copy_from_guest(&xatpr, arg, 1) )
>               return -EFAULT;
> @@ -4769,10 +4769,22 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
>           if ( d == NULL )
>               return -ESRCH;
>
> -        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d)) == 0 )
> +        if ( xatpr.space == XENMAPSPACE_gmfn_foreign )
> +        {
> +            fd = get_pg_owner(xatpr.foreign_domid);
> +            if ( fd == NULL )
> +            {
> +                rcu_unlock_domain(d);
> +                return -ESRCH;
> +            }
> +        }
> +        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
> +        if ( rc == 0 )
>               rc = xenmem_add_to_physmap_range(d, &xatpr);
>
>           rcu_unlock_domain(d);
> +        if ( fd )
> +            put_pg_owner(fd);
>
>           if ( rc == -EAGAIN )
>               rc = hypercall_create_continuation(
> diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
> index eb9e1a1..1228e52 100644
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -467,10 +467,16 @@ static XSM_INLINE int xsm_pci_config_permission(XSM_DEFAULT_ARG struct domain *d
>       return xsm_default_action(action, current->domain, d);
>   }
>
> -static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2)
> +static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d, struct domain *t, struct domain *f)
>   {
> +    int rc;
> +
>       XSM_ASSERT_ACTION(XSM_TARGET);
> -    return xsm_default_action(action, d1, d2);
> +    rc = xsm_default_action(action, d, t);
> +    if ( f && !rc )
> +        rc = xsm_default_action(action, d, f);
> +
> +    return rc;
>   }
>
>   static XSM_INLINE int xsm_remove_from_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2)
> diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
> index 1939453..9ee9543 100644
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -90,7 +90,7 @@ struct xsm_operations {
>       int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2);
>       int (*memory_stat_reservation) (struct domain *d1, struct domain *d2);
>       int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct page_info *page);
> -    int (*add_to_physmap) (struct domain *d1, struct domain *d2);
> +    int (*add_to_physmap) (struct domain *d, struct domain *t, struct domain *f);
>       int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
>       int (*claim_pages) (struct domain *d);
>
> @@ -344,9 +344,9 @@ static inline int xsm_memory_pin_page(xsm_default_t def, struct domain *d1, stru
>       return xsm_ops->memory_pin_page(d1, d2, page);
>   }
>
> -static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d1, struct domain *d2)
> +static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d, struct domain *t, struct domain *f)
>   {
> -    return xsm_ops->add_to_physmap(d1, d2);
> +    return xsm_ops->add_to_physmap(d, t, f);
>   }
>
>   static inline int xsm_remove_from_physmap(xsm_default_t def, struct domain *d1, struct domain *d2)
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index 7cdef04..81294b1 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -1068,9 +1068,14 @@ static inline int flask_tmem_control(void)
>       return domain_has_xen(current->domain, XEN__TMEM_CONTROL);
>   }
>
> -static int flask_add_to_physmap(struct domain *d1, struct domain *d2)
> +static int flask_add_to_physmap(struct domain *d, struct domain *t, struct domain *f)
>   {
> -    return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
> +    int rc;
> +
> +    rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__PHYSMAP);
> +    if ( f && !rc )
> +        rc = domain_has_perm(d, f, SECCLASS_MMU, MMU__MAP_READ|MMU__MAP_WRITE);
> +    return rc;
>   }
>
>   static int flask_remove_from_physmap(struct domain *d1, struct domain *d2)
>

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-19 15:50   ` Daniel De Graaf
@ 2013-12-19 19:55     ` Mukesh Rathor
  0 siblings, 0 replies; 52+ messages in thread
From: Mukesh Rathor @ 2013-12-19 19:55 UTC (permalink / raw)
  To: Daniel De Graaf; +Cc: george.dunlap, Xen-devel, tim, keir.xen, JBeulich

On Thu, 19 Dec 2013 10:50:45 -0500
Daniel De Graaf <dgdegra@tycho.nsa.gov> wrote:

> On 12/16/2013 09:38 PM, Mukesh Rathor wrote:
> > In preparation for the next patch, we update xsm_add_to_physmap to
> > allow for checking of foreign domain. Thus, the current domain must
> > have the right to update the mappings of target domain with pages
> > from foreign domain.
> >
> > Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> 
> Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
> 
> For some reason I only have v3 and cannot find the v4-v7 patches in
> my Inbox, although I do have the copies that came via the xen-devel
> list. I recall having some transient email issues during that time,
> so if Oracle's mail servers have a more aggressive retry policy the
> messages could have been lost due to that; I wasn't intending to
> ignore your patches.

Hi Daniel,

No worries, not sure what happened, I had cc'd you on the individual
patch in v4-v6. Anyways, glad to have your ack, and thanks again.

Mukesh

> 
> > ---
> >   xen/arch/arm/mm.c       |    4 ++--
> >   xen/arch/x86/mm.c       |   18 +++++++++++++++---
> >   xen/include/xsm/dummy.h |   10 ++++++++--
> >   xen/include/xsm/xsm.h   |    6 +++---
> >   xen/xsm/flask/hooks.c   |    9 +++++++--
> >   5 files changed, 35 insertions(+), 12 deletions(-)
> >
> > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> > index 654281a..3515526 100644
> > --- a/xen/arch/arm/mm.c
> > +++ b/xen/arch/arm/mm.c
> > @@ -1134,7 +1134,7 @@ long arch_memory_op(int op,
> > XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL )
> >               return -ESRCH;
> >
> > -        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
> > +        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d,
> > NULL); if ( rc )
> >           {
> >               rcu_unlock_domain(d);
> > @@ -1165,7 +1165,7 @@ long arch_memory_op(int op,
> > XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL )
> >               return -ESRCH;
> >
> > -        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
> > +        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d,
> > NULL); if ( rc )
> >           {
> >               rcu_unlock_domain(d);
> > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> > index ae332f4..0cae437 100644
> > --- a/xen/arch/x86/mm.c
> > +++ b/xen/arch/x86/mm.c
> > @@ -4730,7 +4730,7 @@ long arch_memory_op(int op,
> > XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL )
> >               return -ESRCH;
> >
> > -        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d) )
> > +        if ( xsm_add_to_physmap(XSM_TARGET, current->domain, d,
> > NULL) ) {
> >               rcu_unlock_domain(d);
> >               return -EPERM;
> > @@ -4756,7 +4756,7 @@ long arch_memory_op(int op,
> > XEN_GUEST_HANDLE_PARAM(void) arg) case XENMEM_add_to_physmap_range:
> >       {
> >           struct xen_add_to_physmap_range xatpr;
> > -        struct domain *d;
> > +        struct domain *d, *fd = NULL;
> >
> >           if ( copy_from_guest(&xatpr, arg, 1) )
> >               return -EFAULT;
> > @@ -4769,10 +4769,22 @@ long arch_memory_op(int op,
> > XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL )
> >               return -ESRCH;
> >
> > -        if ( (rc = xsm_add_to_physmap(XSM_TARGET, current->domain,
> > d)) == 0 )
> > +        if ( xatpr.space == XENMAPSPACE_gmfn_foreign )
> > +        {
> > +            fd = get_pg_owner(xatpr.foreign_domid);
> > +            if ( fd == NULL )
> > +            {
> > +                rcu_unlock_domain(d);
> > +                return -ESRCH;
> > +            }
> > +        }
> > +        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d,
> > fd);
> > +        if ( rc == 0 )
> >               rc = xenmem_add_to_physmap_range(d, &xatpr);
> >
> >           rcu_unlock_domain(d);
> > +        if ( fd )
> > +            put_pg_owner(fd);
> >
> >           if ( rc == -EAGAIN )
> >               rc = hypercall_create_continuation(
> > diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
> > index eb9e1a1..1228e52 100644
> > --- a/xen/include/xsm/dummy.h
> > +++ b/xen/include/xsm/dummy.h
> > @@ -467,10 +467,16 @@ static XSM_INLINE int
> > xsm_pci_config_permission(XSM_DEFAULT_ARG struct domain *d return
> > xsm_default_action(action, current->domain, d); }
> >
> > -static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct
> > domain *d1, struct domain *d2) +static XSM_INLINE int
> > xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d, struct domain
> > *t, struct domain *f) {
> > +    int rc;
> > +
> >       XSM_ASSERT_ACTION(XSM_TARGET);
> > -    return xsm_default_action(action, d1, d2);
> > +    rc = xsm_default_action(action, d, t);
> > +    if ( f && !rc )
> > +        rc = xsm_default_action(action, d, f);
> > +
> > +    return rc;
> >   }
> >
> >   static XSM_INLINE int xsm_remove_from_physmap(XSM_DEFAULT_ARG
> > struct domain *d1, struct domain *d2) diff --git
> > a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h index
> > 1939453..9ee9543 100644 --- a/xen/include/xsm/xsm.h
> > +++ b/xen/include/xsm/xsm.h
> > @@ -90,7 +90,7 @@ struct xsm_operations {
> >       int (*memory_adjust_reservation) (struct domain *d1, struct
> > domain *d2); int (*memory_stat_reservation) (struct domain *d1,
> > struct domain *d2); int (*memory_pin_page) (struct domain *d1,
> > struct domain *d2, struct page_info *page);
> > -    int (*add_to_physmap) (struct domain *d1, struct domain *d2);
> > +    int (*add_to_physmap) (struct domain *d, struct domain *t,
> > struct domain *f); int (*remove_from_physmap) (struct domain *d1,
> > struct domain *d2); int (*claim_pages) (struct domain *d);
> >
> > @@ -344,9 +344,9 @@ static inline int
> > xsm_memory_pin_page(xsm_default_t def, struct domain *d1, stru
> > return xsm_ops->memory_pin_page(d1, d2, page); }
> >
> > -static inline int xsm_add_to_physmap(xsm_default_t def, struct
> > domain *d1, struct domain *d2) +static inline int
> > xsm_add_to_physmap(xsm_default_t def, struct domain *d, struct
> > domain *t, struct domain *f) {
> > -    return xsm_ops->add_to_physmap(d1, d2);
> > +    return xsm_ops->add_to_physmap(d, t, f);
> >   }
> >
> >   static inline int xsm_remove_from_physmap(xsm_default_t def,
> > struct domain *d1, struct domain *d2) diff --git
> > a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index
> > 7cdef04..81294b1 100644 --- a/xen/xsm/flask/hooks.c
> > +++ b/xen/xsm/flask/hooks.c
> > @@ -1068,9 +1068,14 @@ static inline int flask_tmem_control(void)
> >       return domain_has_xen(current->domain, XEN__TMEM_CONTROL);
> >   }
> >
> > -static int flask_add_to_physmap(struct domain *d1, struct domain
> > *d2) +static int flask_add_to_physmap(struct domain *d, struct
> > domain *t, struct domain *f) {
> > -    return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
> > +    int rc;
> > +
> > +    rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__PHYSMAP);
> > +    if ( f && !rc )
> > +        rc = domain_has_perm(d, f, SECCLASS_MMU,
> > MMU__MAP_READ|MMU__MAP_WRITE);
> > +    return rc;
> >   }
> >
> >   static int flask_remove_from_physmap(struct domain *d1, struct
> > domain *d2)
> >

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-17  2:38 ` [V7 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
  2013-12-17  8:32   ` Jan Beulich
  2013-12-19 15:50   ` Daniel De Graaf
@ 2014-01-28  1:55   ` Mukesh Rathor
  2014-01-28 10:31     ` Jan Beulich
  2014-02-12 16:47   ` Julien Grall
  3 siblings, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2014-01-28  1:55 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: Xen-devel, JBeulich


Jan,

Because of your changes to XENMEM_add_to_physmap_batch, I had to rework
the non-xsm changes in this patch. Please take a look and lmk if you are 
OK with following.

Thanks
Mukesh

--------

Subject: [PATCH] pvh: xsm changes for add to physmap

In preparation for the next patch, we update xsm_add_to_physmap to
allow for checking of foreign domain. Thus, the current domain must
have the right to update the mappings of target domain with pages from
foreign domain.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
 xen/arch/x86/mm.c        |    4 ++--
 xen/common/memory.c      |   20 +++++++++++++++++---
 xen/include/asm-x86/mm.h |    3 +++
 xen/include/xsm/dummy.h  |   10 ++++++++--
 xen/include/xsm/xsm.h    |    6 +++---
 xen/xsm/flask/hooks.c    |    9 +++++++--
 6 files changed, 40 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 172c68c..467976c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2794,7 +2794,7 @@ int new_guest_cr3(unsigned long mfn)
     return rc;
 }
 
-static struct domain *get_pg_owner(domid_t domid)
+struct domain *get_pg_owner(domid_t domid)
 {
     struct domain *pg_owner = NULL, *curr = current->domain;
 
@@ -2837,7 +2837,7 @@ static struct domain *get_pg_owner(domid_t domid)
     return pg_owner;
 }
 
-static void put_pg_owner(struct domain *pg_owner)
+void put_pg_owner(struct domain *pg_owner)
 {
     rcu_unlock_domain(pg_owner);
 }
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 5a0efd5..aff0ebd 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -838,7 +838,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
+        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL);
         if ( rc )
         {
             rcu_unlock_domain(d);
@@ -860,7 +860,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
     case XENMEM_add_to_physmap_batch:
     {
         struct xen_add_to_physmap_batch xatpb;
-        struct domain *d;
+        struct domain *d, *fd = NULL;
 
         BUILD_BUG_ON((typeof(xatpb.size))-1 >
                      (UINT_MAX >> MEMOP_EXTENT_SHIFT));
@@ -883,7 +883,17 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( d == NULL )
             return -ESRCH;
 
-        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
+#ifdef CONFIG_X86
+        if ( xatpb.space == XENMAPSPACE_gmfn_foreign )
+        {
+            if ( (fd = get_pg_owner(xatpb.foreign_domid)) == NULL )
+            {
+                rcu_unlock_domain(d);
+                return -ESRCH;
+            }
+        }
+#endif
+        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
         if ( rc )
         {
             rcu_unlock_domain(d);
@@ -893,6 +903,10 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         rc = xenmem_add_to_physmap_batch(d, &xatpb, start_extent);
 
         rcu_unlock_domain(d);
+#ifdef CONFIG_X86
+        if ( fd )
+            put_pg_owner(fd);
+#endif
 
         if ( rc > 0 )
             rc = hypercall_create_continuation(
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index c835f76..b5f8faa 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -610,4 +610,7 @@ typedef struct mm_rwlock {
     const char        *locker_function; /* func that took it */
 } mm_rwlock_t;
 
+struct domain *get_pg_owner(domid_t domid);
+void put_pg_owner(struct domain *pg_owner);
+
 #endif /* __ASM_X86_MM_H__ */
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index eb9e1a1..1228e52 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -467,10 +467,16 @@ static XSM_INLINE int xsm_pci_config_permission(XSM_DEFAULT_ARG struct domain *d
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2)
+static XSM_INLINE int xsm_add_to_physmap(XSM_DEFAULT_ARG struct domain *d, struct domain *t, struct domain *f)
 {
+    int rc;
+
     XSM_ASSERT_ACTION(XSM_TARGET);
-    return xsm_default_action(action, d1, d2);
+    rc = xsm_default_action(action, d, t);
+    if ( f && !rc )
+        rc = xsm_default_action(action, d, f);
+
+    return rc;
 }
 
 static XSM_INLINE int xsm_remove_from_physmap(XSM_DEFAULT_ARG struct domain *d1, struct domain *d2)
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 1939453..9ee9543 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -90,7 +90,7 @@ struct xsm_operations {
     int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_stat_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct page_info *page);
-    int (*add_to_physmap) (struct domain *d1, struct domain *d2);
+    int (*add_to_physmap) (struct domain *d, struct domain *t, struct domain *f);
     int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
     int (*claim_pages) (struct domain *d);
 
@@ -344,9 +344,9 @@ static inline int xsm_memory_pin_page(xsm_default_t def, struct domain *d1, stru
     return xsm_ops->memory_pin_page(d1, d2, page);
 }
 
-static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d1, struct domain *d2)
+static inline int xsm_add_to_physmap(xsm_default_t def, struct domain *d, struct domain *t, struct domain *f)
 {
-    return xsm_ops->add_to_physmap(d1, d2);
+    return xsm_ops->add_to_physmap(d, t, f);
 }
 
 static inline int xsm_remove_from_physmap(xsm_default_t def, struct domain *d1, struct domain *d2)
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 7cdef04..81294b1 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1068,9 +1068,14 @@ static inline int flask_tmem_control(void)
     return domain_has_xen(current->domain, XEN__TMEM_CONTROL);
 }
 
-static int flask_add_to_physmap(struct domain *d1, struct domain *d2)
+static int flask_add_to_physmap(struct domain *d, struct domain *t, struct domain *f)
 {
-    return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
+    int rc;
+
+    rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__PHYSMAP);
+    if ( f && !rc )
+        rc = domain_has_perm(d, f, SECCLASS_MMU, MMU__MAP_READ|MMU__MAP_WRITE);
+    return rc;
 }
 
 static int flask_remove_from_physmap(struct domain *d1, struct domain *d2)
-- 
1.7.2.3

^ permalink raw reply related	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-28  1:55   ` Mukesh Rathor
@ 2014-01-28 10:31     ` Jan Beulich
  2014-01-29  2:08       ` Mukesh Rathor
  0 siblings, 1 reply; 52+ messages in thread
From: Jan Beulich @ 2014-01-28 10:31 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: xen-devel

>>> On 28.01.14 at 02:55, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -838,7 +838,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>          if ( d == NULL )
>              return -ESRCH;
>  
> -        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
> +        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, NULL);
>          if ( rc )
>          {
>              rcu_unlock_domain(d);
> @@ -860,7 +860,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>      case XENMEM_add_to_physmap_batch:
>      {
>          struct xen_add_to_physmap_batch xatpb;
> -        struct domain *d;
> +        struct domain *d, *fd = NULL;
>  
>          BUILD_BUG_ON((typeof(xatpb.size))-1 >
>                       (UINT_MAX >> MEMOP_EXTENT_SHIFT));
> @@ -883,7 +883,17 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>          if ( d == NULL )
>              return -ESRCH;
>  
> -        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d);
> +#ifdef CONFIG_X86
> +        if ( xatpb.space == XENMAPSPACE_gmfn_foreign )
> +        {
> +            if ( (fd = get_pg_owner(xatpb.foreign_domid)) == NULL )
> +            {
> +                rcu_unlock_domain(d);
> +                return -ESRCH;
> +            }
> +        }
> +#endif
> +        rc = xsm_add_to_physmap(XSM_TARGET, current->domain, d, fd);
>          if ( rc )
>          {
>              rcu_unlock_domain(d);
> @@ -893,6 +903,10 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>          rc = xenmem_add_to_physmap_batch(d, &xatpb, start_extent);
>  
>          rcu_unlock_domain(d);
> +#ifdef CONFIG_X86
> +        if ( fd )
> +            put_pg_owner(fd);
> +#endif
>  
>          if ( rc > 0 )
>              rc = hypercall_create_continuation(

The only think x86-specific here is that {get,put}_pg_owner() may
not exist on ARM. But the general operation isn't x86-specific, so
there shouldn't be any CONFIG_X86 dependency here. Instead
you ought to work out with the ARM maintainers whether to stub
out those two functions, or whether the functionality is useful
there too (and hence proper implementations would be needed).

In the latter case I would then also wonder whether the x86
implementation shouldn't be moved into common code.

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-28 10:31     ` Jan Beulich
@ 2014-01-29  2:08       ` Mukesh Rathor
  2014-01-29 10:40         ` Ian Campbell
  0 siblings, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2014-01-29  2:08 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Ian Campbell, stefano.stabellini

On Tue, 28 Jan 2014 10:31:36 +0000
"Jan Beulich" <JBeulich@suse.com> wrote:

> >>> On 28.01.14 at 02:55, Mukesh Rathor <mukesh.rathor@oracle.com>
> >>> wrote:
> > --- a/xen/common/memory.c
> > +++ b/xen/common/memory.c
....
> The only think x86-specific here is that {get,put}_pg_owner() may
> not exist on ARM. But the general operation isn't x86-specific, so
> there shouldn't be any CONFIG_X86 dependency here. Instead
> you ought to work out with the ARM maintainers whether to stub
> out those two functions, or whether the functionality is useful
> there too (and hence proper implementations would be needed).
> 
> In the latter case I would then also wonder whether the x86
> implementation shouldn't be moved into common code.

Stefano/Ian:

If you have use for get_pg_owner() I can stub it out for now and
have it return 1, as NULL would result in error. Otherwise, I can
change the function prototype to return rc with ARM always returning 
0 and not doing anything, like:

        if ( xatpb.space == XENMAPSPACE_gmfn_foreign )
        {
            if ( (rc = get_pg_owner(xatpb.foreign_domid, &fd)) )
            {
                rcu_unlock_domain(d);
                return rc;
            }
        }

which on ARM would always return 0, setting fd to NULL.

If you think it would be needed in ARM, I can just leave the function
prototype the same and you guys can implement whenever as I don't have the
insight into ARM, and if it looks the same as x86 you can commonise it too.

thanks
Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-29  2:08       ` Mukesh Rathor
@ 2014-01-29 10:40         ` Ian Campbell
  2014-01-29 11:38           ` Tim Deegan
  0 siblings, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2014-01-29 10:40 UTC (permalink / raw)
  To: Mukesh Rathor; +Cc: xen-devel, Jan Beulich, stefano.stabellini

On Tue, 2014-01-28 at 18:08 -0800, Mukesh Rathor wrote:
> On Tue, 28 Jan 2014 10:31:36 +0000
> "Jan Beulich" <JBeulich@suse.com> wrote:
> 
> > >>> On 28.01.14 at 02:55, Mukesh Rathor <mukesh.rathor@oracle.com>
> > >>> wrote:
> > > --- a/xen/common/memory.c
> > > +++ b/xen/common/memory.c
> ....
> > The only think x86-specific here is that {get,put}_pg_owner() may
> > not exist on ARM. But the general operation isn't x86-specific, so
> > there shouldn't be any CONFIG_X86 dependency here. Instead
> > you ought to work out with the ARM maintainers whether to stub
> > out those two functions, or whether the functionality is useful
> > there too (and hence proper implementations would be needed).
> > 
> > In the latter case I would then also wonder whether the x86
> > implementation shouldn't be moved into common code.
> 
> Stefano/Ian:
> 
> If you have use for get_pg_owner() I can stub it out for now and
> have it return 1, as NULL would result in error. Otherwise, I can
> change the function prototype to return rc with ARM always returning 
> 0 and not doing anything, like:
> 
>         if ( xatpb.space == XENMAPSPACE_gmfn_foreign )
>         {
>             if ( (rc = get_pg_owner(xatpb.foreign_domid, &fd)) )
>             {
>                 rcu_unlock_domain(d);
>                 return rc;
>             }
>         }
> 
> which on ARM would always return 0, setting fd to NULL.
> 
> If you think it would be needed in ARM, I can just leave the function
> prototype the same and you guys can implement whenever as I don't have the
> insight into ARM, and if it looks the same as x86 you can commonise it too.

Yes, please just make get/put_pg_owner common.

The only required change would be to:
    if ( unlikely(paging_mode_translate(curr)) )
    {
        MEM_LOG("Cannot mix foreign mappings with translated domains");
        goto out;
    }

which is not needed for ARM, and I suspect needs adjusting for PVH too
(ah, there it is in the next patch). I think the best solution there
would be a new predicate e.g. paging_mode_supports_foreign(curr) (or
some better name, I don't especially like my suggestion)

on ARM:

#define paging_mode_supports_foreign(d) (1)

on x86:

#define paging_mode_support_foreign(d) (is_pvh_domain(curr) || !(paging_mode_translate(curr))

Thanks,
Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-29 10:40         ` Ian Campbell
@ 2014-01-29 11:38           ` Tim Deegan
  2014-01-29 11:41             ` Ian Campbell
  0 siblings, 1 reply; 52+ messages in thread
From: Tim Deegan @ 2014-01-29 11:38 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Jan Beulich, stefano.stabellini

At 10:40 +0000 on 29 Jan (1390988426), Ian Campbell wrote:
> On Tue, 2014-01-28 at 18:08 -0800, Mukesh Rathor wrote:
> > On Tue, 28 Jan 2014 10:31:36 +0000
> > "Jan Beulich" <JBeulich@suse.com> wrote:
> > > The only think x86-specific here is that {get,put}_pg_owner() may
> > > not exist on ARM. But the general operation isn't x86-specific, so
> > > there shouldn't be any CONFIG_X86 dependency here. Instead
> > > you ought to work out with the ARM maintainers whether to stub
> > > out those two functions, or whether the functionality is useful
> > > there too (and hence proper implementations would be needed).
[...]
> Yes, please just make get/put_pg_owner common.
> 
> The only required change would be to:
>     if ( unlikely(paging_mode_translate(curr)) )
>     {
>         MEM_LOG("Cannot mix foreign mappings with translated domains");
>         goto out;
>     }
> 
> which is not needed for ARM, and I suspect needs adjusting for PVH too
> (ah, there it is in the next patch). I think the best solution there
> would be a new predicate e.g. paging_mode_supports_foreign(curr) (or
> some better name, I don't especially like my suggestion)
> 
> on ARM:
> 
> #define paging_mode_supports_foreign(d) (1)
> 
> on x86:
> 
> #define paging_mode_support_foreign(d) (is_pvh_domain(curr) || !(paging_mode_translate(curr))
> 

Hmmm.  That's likely to have unintended consequences somewhere.  (And
if that check is really not needed for PVH maybe it's not needed for
HVM either, given that they share all their paging support code).

But I don't think we need to tinker with it anyway - AFAICS,
get_pg_owner() isn't really what's wanted in the XATP code.  All the
other uses of get_pg_owner() are in the x86 PV MMU code, which this is
definitely not, and it handles cases (like mmio) that we don't want
here anyway.  How about using rcu_lock_live_remote_domain_by_id()?

Tim.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-29 11:38           ` Tim Deegan
@ 2014-01-29 11:41             ` Ian Campbell
  2014-01-29 11:48               ` Tim Deegan
  0 siblings, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2014-01-29 11:41 UTC (permalink / raw)
  To: Tim Deegan; +Cc: xen-devel, Jan Beulich, stefano.stabellini

On Wed, 2014-01-29 at 12:38 +0100, Tim Deegan wrote:
> At 10:40 +0000 on 29 Jan (1390988426), Ian Campbell wrote:
> > On Tue, 2014-01-28 at 18:08 -0800, Mukesh Rathor wrote:
> > > On Tue, 28 Jan 2014 10:31:36 +0000
> > > "Jan Beulich" <JBeulich@suse.com> wrote:
> > > > The only think x86-specific here is that {get,put}_pg_owner() may
> > > > not exist on ARM. But the general operation isn't x86-specific, so
> > > > there shouldn't be any CONFIG_X86 dependency here. Instead
> > > > you ought to work out with the ARM maintainers whether to stub
> > > > out those two functions, or whether the functionality is useful
> > > > there too (and hence proper implementations would be needed).
> [...]
> > Yes, please just make get/put_pg_owner common.
> > 
> > The only required change would be to:
> >     if ( unlikely(paging_mode_translate(curr)) )
> >     {
> >         MEM_LOG("Cannot mix foreign mappings with translated domains");
> >         goto out;
> >     }
> > 
> > which is not needed for ARM, and I suspect needs adjusting for PVH too
> > (ah, there it is in the next patch). I think the best solution there
> > would be a new predicate e.g. paging_mode_supports_foreign(curr) (or
> > some better name, I don't especially like my suggestion)
> > 
> > on ARM:
> > 
> > #define paging_mode_supports_foreign(d) (1)
> > 
> > on x86:
> > 
> > #define paging_mode_support_foreign(d) (is_pvh_domain(curr) || !(paging_mode_translate(curr))
> > 
> 
> Hmmm.  That's likely to have unintended consequences somewhere.  (And
> if that check is really not needed for PVH maybe it's not needed for
> HVM either, given that they share all their paging support code).
> 
> But I don't think we need to tinker with it anyway - AFAICS,
> get_pg_owner() isn't really what's wanted in the XATP code.  All the
> other uses of get_pg_owner() are in the x86 PV MMU code, which this is
> definitely not, and it handles cases (like mmio) that we don't want
> here anyway.  How about using rcu_lock_live_remote_domain_by_id()?

We have a struct page in our hand -- don't we need to lookup the owner
and lock it somewhat atomically?

Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-29 11:41             ` Ian Campbell
@ 2014-01-29 11:48               ` Tim Deegan
  2014-01-29 11:51                 ` Ian Campbell
  0 siblings, 1 reply; 52+ messages in thread
From: Tim Deegan @ 2014-01-29 11:48 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Jan Beulich, stefano.stabellini

At 11:41 +0000 on 29 Jan (1390992062), Ian Campbell wrote:
> On Wed, 2014-01-29 at 12:38 +0100, Tim Deegan wrote:
> > At 10:40 +0000 on 29 Jan (1390988426), Ian Campbell wrote:
> > > On Tue, 2014-01-28 at 18:08 -0800, Mukesh Rathor wrote:
> > > > On Tue, 28 Jan 2014 10:31:36 +0000
> > > > "Jan Beulich" <JBeulich@suse.com> wrote:
> > > > > The only think x86-specific here is that {get,put}_pg_owner() may
> > > > > not exist on ARM. But the general operation isn't x86-specific, so
> > > > > there shouldn't be any CONFIG_X86 dependency here. Instead
> > > > > you ought to work out with the ARM maintainers whether to stub
> > > > > out those two functions, or whether the functionality is useful
> > > > > there too (and hence proper implementations would be needed).
> > [...]
> > > Yes, please just make get/put_pg_owner common.
> > > 
> > > The only required change would be to:
> > >     if ( unlikely(paging_mode_translate(curr)) )
> > >     {
> > >         MEM_LOG("Cannot mix foreign mappings with translated domains");
> > >         goto out;
> > >     }
> > > 
> > > which is not needed for ARM, and I suspect needs adjusting for PVH too
> > > (ah, there it is in the next patch). I think the best solution there
> > > would be a new predicate e.g. paging_mode_supports_foreign(curr) (or
> > > some better name, I don't especially like my suggestion)
> > > 
> > > on ARM:
> > > 
> > > #define paging_mode_supports_foreign(d) (1)
> > > 
> > > on x86:
> > > 
> > > #define paging_mode_support_foreign(d) (is_pvh_domain(curr) || !(paging_mode_translate(curr))
> > > 
> > 
> > Hmmm.  That's likely to have unintended consequences somewhere.  (And
> > if that check is really not needed for PVH maybe it's not needed for
> > HVM either, given that they share all their paging support code).
> > 
> > But I don't think we need to tinker with it anyway - AFAICS,
> > get_pg_owner() isn't really what's wanted in the XATP code.  All the
> > other uses of get_pg_owner() are in the x86 PV MMU code, which this is
> > definitely not, and it handles cases (like mmio) that we don't want
> > here anyway.  How about using rcu_lock_live_remote_domain_by_id()?
> 
> We have a struct page in our hand -- don't we need to lookup the owner
> and lock it somewhat atomically?

I'm not sure what you mean: 
 - the code that Mukesh is adding doesn't have a struct page, it's
   just grabbing the foreign domid from the hypercall arg;
 - if we did have a struct page, we'd just need to take a ref to 
   stop the owner changing underfoot; and
 - get_pg_owner() takes a domid anyway.

Tim.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-29 11:48               ` Tim Deegan
@ 2014-01-29 11:51                 ` Ian Campbell
  2014-01-30  1:33                   ` Mukesh Rathor
  0 siblings, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2014-01-29 11:51 UTC (permalink / raw)
  To: Tim Deegan; +Cc: xen-devel, Jan Beulich, stefano.stabellini

On Wed, 2014-01-29 at 12:48 +0100, Tim Deegan wrote:
> At 11:41 +0000 on 29 Jan (1390992062), Ian Campbell wrote:
> > On Wed, 2014-01-29 at 12:38 +0100, Tim Deegan wrote:
> > > At 10:40 +0000 on 29 Jan (1390988426), Ian Campbell wrote:
> > > > On Tue, 2014-01-28 at 18:08 -0800, Mukesh Rathor wrote:
> > > > > On Tue, 28 Jan 2014 10:31:36 +0000
> > > > > "Jan Beulich" <JBeulich@suse.com> wrote:
> > > > > > The only think x86-specific here is that {get,put}_pg_owner() may
> > > > > > not exist on ARM. But the general operation isn't x86-specific, so
> > > > > > there shouldn't be any CONFIG_X86 dependency here. Instead
> > > > > > you ought to work out with the ARM maintainers whether to stub
> > > > > > out those two functions, or whether the functionality is useful
> > > > > > there too (and hence proper implementations would be needed).
> > > [...]
> > > > Yes, please just make get/put_pg_owner common.
> > > > 
> > > > The only required change would be to:
> > > >     if ( unlikely(paging_mode_translate(curr)) )
> > > >     {
> > > >         MEM_LOG("Cannot mix foreign mappings with translated domains");
> > > >         goto out;
> > > >     }
> > > > 
> > > > which is not needed for ARM, and I suspect needs adjusting for PVH too
> > > > (ah, there it is in the next patch). I think the best solution there
> > > > would be a new predicate e.g. paging_mode_supports_foreign(curr) (or
> > > > some better name, I don't especially like my suggestion)
> > > > 
> > > > on ARM:
> > > > 
> > > > #define paging_mode_supports_foreign(d) (1)
> > > > 
> > > > on x86:
> > > > 
> > > > #define paging_mode_support_foreign(d) (is_pvh_domain(curr) || !(paging_mode_translate(curr))
> > > > 
> > > 
> > > Hmmm.  That's likely to have unintended consequences somewhere.  (And
> > > if that check is really not needed for PVH maybe it's not needed for
> > > HVM either, given that they share all their paging support code).
> > > 
> > > But I don't think we need to tinker with it anyway - AFAICS,
> > > get_pg_owner() isn't really what's wanted in the XATP code.  All the
> > > other uses of get_pg_owner() are in the x86 PV MMU code, which this is
> > > definitely not, and it handles cases (like mmio) that we don't want
> > > here anyway.  How about using rcu_lock_live_remote_domain_by_id()?
> > 
> > We have a struct page in our hand -- don't we need to lookup the owner
> > and lock it somewhat atomically?
> 
> I'm not sure what you mean: 
>  - the code that Mukesh is adding doesn't have a struct page, it's
>    just grabbing the foreign domid from the hypercall arg;
>  - if we did have a struct page, we'd just need to take a ref to 
>    stop the owner changing underfoot; and
>  - get_pg_owner() takes a domid anyway.

Sorry, I was confused/mislead by the name...

rcu_lock_live_remote_domain_by_id does look like what is needed.

Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-29 11:51                 ` Ian Campbell
@ 2014-01-30  1:33                   ` Mukesh Rathor
  2014-02-09 16:51                     ` Julien Grall
  0 siblings, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2014-01-30  1:33 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Tim Deegan, Jan Beulich, stefano.stabellini

On Wed, 29 Jan 2014 11:51:35 +0000
Ian Campbell <Ian.Campbell@citrix.com> wrote:

> On Wed, 2014-01-29 at 12:48 +0100, Tim Deegan wrote:
> > At 11:41 +0000 on 29 Jan (1390992062), Ian Campbell wrote:
> > > On Wed, 2014-01-29 at 12:38 +0100, Tim Deegan wrote:
> > > > At 10:40 +0000 on 29 Jan (1390988426), Ian Campbell wrote:
> > > > > On Tue, 2014-01-28 at 18:08 -0800, Mukesh Rathor wrote:
> > > > > > On Tue, 28 Jan 2014 10:31:36 +0000
> > > > > > "Jan Beulich" <JBeulich@suse.com> wrote:
> > > > > > > The only think x86-specific here is that
> > > > > > > {get,put}_pg_owner() may not exist on ARM. But the
> > > > > > > general operation isn't x86-specific, so there shouldn't
> > > > > > > be any CONFIG_X86 dependency here. Instead you ought to
> > > > > > > work out with the ARM maintainers whether to stub out
> > > > > > > those two functions, or whether the functionality is
> > > > > > > useful there too (and hence proper implementations would
> > > > > > > be needed).
> > > > [...]
> > > > > Yes, please just make get/put_pg_owner common.
> > > > > 
> > > > > The only required change would be to:
> > > > >     if ( unlikely(paging_mode_translate(curr)) )
> > > > >     {
> > > > >         MEM_LOG("Cannot mix foreign mappings with translated
> > > > > domains"); goto out;
> > > > >     }
> > > > > 
> > > > > which is not needed for ARM, and I suspect needs adjusting
> > > > > for PVH too (ah, there it is in the next patch). I think the
> > > > > best solution there would be a new predicate e.g.
> > > > > paging_mode_supports_foreign(curr) (or some better name, I
> > > > > don't especially like my suggestion)
> > > > > 
> > > > > on ARM:
> > > > > 
> > > > > #define paging_mode_supports_foreign(d) (1)
> > > > > 
> > > > > on x86:
> > > > > 
> > > > > #define paging_mode_support_foreign(d) (is_pvh_domain(curr)
> > > > > || !(paging_mode_translate(curr))
> > > > > 
> > > > 
> > > > Hmmm.  That's likely to have unintended consequences
> > > > somewhere.  (And if that check is really not needed for PVH
> > > > maybe it's not needed for HVM either, given that they share all
> > > > their paging support code).
> > > > 
> > > > But I don't think we need to tinker with it anyway - AFAICS,
> > > > get_pg_owner() isn't really what's wanted in the XATP code.
> > > > All the other uses of get_pg_owner() are in the x86 PV MMU
> > > > code, which this is definitely not, and it handles cases (like
> > > > mmio) that we don't want here anyway.  How about using
> > > > rcu_lock_live_remote_domain_by_id()?
> > > 
> > > We have a struct page in our hand -- don't we need to lookup the
> > > owner and lock it somewhat atomically?
> > 
> > I'm not sure what you mean: 
> >  - the code that Mukesh is adding doesn't have a struct page, it's
> >    just grabbing the foreign domid from the hypercall arg;
> >  - if we did have a struct page, we'd just need to take a ref to 
> >    stop the owner changing underfoot; and
> >  - get_pg_owner() takes a domid anyway.
> 
> Sorry, I was confused/mislead by the name...
> 
> rcu_lock_live_remote_domain_by_id does look like what is needed.

Yup, that will do it. Thanks Tim.

Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-01-30  1:33                   ` Mukesh Rathor
@ 2014-02-09 16:51                     ` Julien Grall
  2014-02-10 13:42                       ` Ian Campbell
  0 siblings, 1 reply; 52+ messages in thread
From: Julien Grall @ 2014-02-09 16:51 UTC (permalink / raw)
  To: Mukesh Rathor, Ian Campbell
  Cc: xen-devel, Tim Deegan, Jan Beulich, stefano.stabellini

Hello Mukesh,

On 30/01/14 01:33, Mukesh Rathor wrote:
>>> I'm not sure what you mean:
>>>   - the code that Mukesh is adding doesn't have a struct page, it's
>>>     just grabbing the foreign domid from the hypercall arg;
>>>   - if we did have a struct page, we'd just need to take a ref to
>>>     stop the owner changing underfoot; and
>>>   - get_pg_owner() takes a domid anyway.
>>
>> Sorry, I was confused/mislead by the name...
>>
>> rcu_lock_live_remote_domain_by_id does look like what is needed.

Following the xentrace tread: 
http://www.gossamer-threads.com/lists/xen/devel/315883, 
rcu_lock_live_remote_domain_by_id will not correctly works.

On Xen on ARM, xentrace is using this hypercall to map XEN page (via 
DOMID_XEN). In this case, rcu_lock_*domain* will always fails which will 
prevent xentrace to works on Xen on ARM (and on PVH).

Regards,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-09 16:51                     ` Julien Grall
@ 2014-02-10 13:42                       ` Ian Campbell
  2014-02-10 15:16                         ` Julien Grall
  0 siblings, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2014-02-10 13:42 UTC (permalink / raw)
  To: Julien Grall; +Cc: xen-devel, Tim Deegan, Jan Beulich, stefano.stabellini

On Sun, 2014-02-09 at 16:51 +0000, Julien Grall wrote:
> Hello Mukesh,
> 
> On 30/01/14 01:33, Mukesh Rathor wrote:
> >>> I'm not sure what you mean:
> >>>   - the code that Mukesh is adding doesn't have a struct page, it's
> >>>     just grabbing the foreign domid from the hypercall arg;
> >>>   - if we did have a struct page, we'd just need to take a ref to
> >>>     stop the owner changing underfoot; and
> >>>   - get_pg_owner() takes a domid anyway.
> >>
> >> Sorry, I was confused/mislead by the name...
> >>
> >> rcu_lock_live_remote_domain_by_id does look like what is needed.
> 
> Following the xentrace tread: 
> http://www.gossamer-threads.com/lists/xen/devel/315883, 
> rcu_lock_live_remote_domain_by_id will not correctly works.
> 
> On Xen on ARM, xentrace is using this hypercall to map XEN page (via 
> DOMID_XEN). In this case, rcu_lock_*domain* will always fails which will 
> prevent xentrace to works on Xen on ARM (and on PVH).

I'm not sure how that extends to add_to_physmap though -- doing add to
physmap of a DOMID_XEN owned page through the "back door" in this way
isn't supposed to work.

Ian.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-10 13:42                       ` Ian Campbell
@ 2014-02-10 15:16                         ` Julien Grall
  2014-02-10 15:27                           ` Ian Campbell
  0 siblings, 1 reply; 52+ messages in thread
From: Julien Grall @ 2014-02-10 15:16 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Tim Deegan, Jan Beulich, stefano.stabellini

Hi Ian,

On 02/10/2014 01:42 PM, Ian Campbell wrote:
> On Sun, 2014-02-09 at 16:51 +0000, Julien Grall wrote:
>> Hello Mukesh,
>>
>> On 30/01/14 01:33, Mukesh Rathor wrote:
>>>>> I'm not sure what you mean:
>>>>>   - the code that Mukesh is adding doesn't have a struct page, it's
>>>>>     just grabbing the foreign domid from the hypercall arg;
>>>>>   - if we did have a struct page, we'd just need to take a ref to
>>>>>     stop the owner changing underfoot; and
>>>>>   - get_pg_owner() takes a domid anyway.
>>>>
>>>> Sorry, I was confused/mislead by the name...
>>>>
>>>> rcu_lock_live_remote_domain_by_id does look like what is needed.
>>
>> Following the xentrace tread: 
>> http://www.gossamer-threads.com/lists/xen/devel/315883, 
>> rcu_lock_live_remote_domain_by_id will not correctly works.
>>
>> On Xen on ARM, xentrace is using this hypercall to map XEN page (via 
>> DOMID_XEN). In this case, rcu_lock_*domain* will always fails which will 
>> prevent xentrace to works on Xen on ARM (and on PVH).
> 
> I'm not sure how that extends to add_to_physmap though -- doing add to
> physmap of a DOMID_XEN owned page through the "back door" in this way
> isn't supposed to work.

Currently xentrace is using xc_map_foreign_page to map the trace buffer
(with DOMID_XEN in argument).

AFAIK, on x86 PV domain, this called is resulting by an
HYPERVISOR_mmu_update which allow do map xen page on priviliged domain
(with the dummy XSM policy).

For ARM, a call to xc_map_foreign_page will end up to
XENMEM_add_to_physmap_range(XENMAPSPACE_gmfn_foreign).

For both architecture, you can look at the function
xen_remap_map_domain_mfn_range (implemented differently on ARM and x86)
which is the last function called before going to the hypervisor.

If we don't modify the hypercall XENMEM_add_to_physmap, we will have a
add a new way to map Xen page for xentrace & co.

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-10 15:16                         ` Julien Grall
@ 2014-02-10 15:27                           ` Ian Campbell
  2014-02-10 15:33                             ` Julien Grall
  0 siblings, 1 reply; 52+ messages in thread
From: Ian Campbell @ 2014-02-10 15:27 UTC (permalink / raw)
  To: Julien Grall; +Cc: xen-devel, Tim Deegan, Jan Beulich, stefano.stabellini

On Mon, 2014-02-10 at 15:16 +0000, Julien Grall wrote:
> Hi Ian,
> 
> On 02/10/2014 01:42 PM, Ian Campbell wrote:
> > On Sun, 2014-02-09 at 16:51 +0000, Julien Grall wrote:
> >> Hello Mukesh,
> >>
> >> On 30/01/14 01:33, Mukesh Rathor wrote:
> >>>>> I'm not sure what you mean:
> >>>>>   - the code that Mukesh is adding doesn't have a struct page, it's
> >>>>>     just grabbing the foreign domid from the hypercall arg;
> >>>>>   - if we did have a struct page, we'd just need to take a ref to
> >>>>>     stop the owner changing underfoot; and
> >>>>>   - get_pg_owner() takes a domid anyway.
> >>>>
> >>>> Sorry, I was confused/mislead by the name...
> >>>>
> >>>> rcu_lock_live_remote_domain_by_id does look like what is needed.
> >>
> >> Following the xentrace tread: 
> >> http://www.gossamer-threads.com/lists/xen/devel/315883, 
> >> rcu_lock_live_remote_domain_by_id will not correctly works.
> >>
> >> On Xen on ARM, xentrace is using this hypercall to map XEN page (via 
> >> DOMID_XEN). In this case, rcu_lock_*domain* will always fails which will 
> >> prevent xentrace to works on Xen on ARM (and on PVH).
> > 
> > I'm not sure how that extends to add_to_physmap though -- doing add to
> > physmap of a DOMID_XEN owned page through the "back door" in this way
> > isn't supposed to work.
> 
> Currently xentrace is using xc_map_foreign_page to map the trace buffer
> (with DOMID_XEN in argument).

Sorry, I misunderstood, I thought you were suggesting that rcu_lock_...
didn't work for xentrace and so couldn't work for add_to_physmap either
-- but actually xentrace ends up using add_to_physmap itself.

> AFAIK, on x86 PV domain, this called is resulting by an
> HYPERVISOR_mmu_update which allow do map xen page on priviliged domain
> (with the dummy XSM policy).
> 
> For ARM, a call to xc_map_foreign_page will end up to
> XENMEM_add_to_physmap_range(XENMAPSPACE_gmfn_foreign).
> 
> For both architecture, you can look at the function
> xen_remap_map_domain_mfn_range (implemented differently on ARM and x86)
> which is the last function called before going to the hypervisor.
> 
> If we don't modify the hypercall XENMEM_add_to_physmap, we will have a
> add a new way to map Xen page for xentrace & co.

Wouldn't it be incorrect to generically return OK for mapping a
DOMID_XEN owned page -- at least something needs to validate that the
particular mfn being mapped is supposed to be shared with the guest in
question.

TBH, it doesn't seem that this mechanism for sharing xenpages with
guests is a good fit for PVH or HVM guests/dom0. Perhaps a specific
mapspace would be more appropriate?

Ian

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-10 15:27                           ` Ian Campbell
@ 2014-02-10 15:33                             ` Julien Grall
  2014-02-10 15:37                               ` Ian Campbell
  2014-02-20  2:37                               ` Mukesh Rathor
  0 siblings, 2 replies; 52+ messages in thread
From: Julien Grall @ 2014-02-10 15:33 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Tim Deegan, Jan Beulich, stefano.stabellini

On 02/10/2014 03:27 PM, Ian Campbell wrote:
> On Mon, 2014-02-10 at 15:16 +0000, Julien Grall wrote:
>> Hi Ian,
>>
>> On 02/10/2014 01:42 PM, Ian Campbell wrote:
>>> On Sun, 2014-02-09 at 16:51 +0000, Julien Grall wrote:
>>>> Hello Mukesh,
>>>>
>>>> On 30/01/14 01:33, Mukesh Rathor wrote:
>>>>>>> I'm not sure what you mean:
>>>>>>>   - the code that Mukesh is adding doesn't have a struct page, it's
>>>>>>>     just grabbing the foreign domid from the hypercall arg;
>>>>>>>   - if we did have a struct page, we'd just need to take a ref to
>>>>>>>     stop the owner changing underfoot; and
>>>>>>>   - get_pg_owner() takes a domid anyway.
>>>>>>
>>>>>> Sorry, I was confused/mislead by the name...
>>>>>>
>>>>>> rcu_lock_live_remote_domain_by_id does look like what is needed.
>>>>
>>>> Following the xentrace tread: 
>>>> http://www.gossamer-threads.com/lists/xen/devel/315883, 
>>>> rcu_lock_live_remote_domain_by_id will not correctly works.
>>>>
>>>> On Xen on ARM, xentrace is using this hypercall to map XEN page (via 
>>>> DOMID_XEN). In this case, rcu_lock_*domain* will always fails which will 
>>>> prevent xentrace to works on Xen on ARM (and on PVH).
>>>
>>> I'm not sure how that extends to add_to_physmap though -- doing add to
>>> physmap of a DOMID_XEN owned page through the "back door" in this way
>>> isn't supposed to work.
>>
>> Currently xentrace is using xc_map_foreign_page to map the trace buffer
>> (with DOMID_XEN in argument).
> 
> Sorry, I misunderstood, I thought you were suggesting that rcu_lock_...
> didn't work for xentrace and so couldn't work for add_to_physmap either
> -- but actually xentrace ends up using add_to_physmap itself.
> 
>> AFAIK, on x86 PV domain, this called is resulting by an
>> HYPERVISOR_mmu_update which allow do map xen page on priviliged domain
>> (with the dummy XSM policy).
>>
>> For ARM, a call to xc_map_foreign_page will end up to
>> XENMEM_add_to_physmap_range(XENMAPSPACE_gmfn_foreign).
>>
>> For both architecture, you can look at the function
>> xen_remap_map_domain_mfn_range (implemented differently on ARM and x86)
>> which is the last function called before going to the hypervisor.
>>
>> If we don't modify the hypercall XENMEM_add_to_physmap, we will have a
>> add a new way to map Xen page for xentrace & co.
> 
> Wouldn't it be incorrect to generically return OK for mapping a
> DOMID_XEN owned page -- at least something needs to validate that the
> particular mfn being mapped is supposed to be shared with the guest in
> question.

It's already the case. By default a xen heap page doesn't belong to
DOMID_XEN. Xen has to explicitly call share_xen_page_with_privileged
guess (see an example in xen/common/trace.c:244) to set DOMID_XEN to the
given page.

> 
> TBH, it doesn't seem that this mechanism for sharing xenpages with
> guests is a good fit for PVH or HVM guests/dom0. Perhaps a specific
> mapspace would be more appropriate?

Following my explanation just above, I don't think we need to have a
specific mapspace. XSM and DOMID_XEN will already protect correctly the
mapping of xen heap page.

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-10 15:33                             ` Julien Grall
@ 2014-02-10 15:37                               ` Ian Campbell
  2014-02-20  2:37                               ` Mukesh Rathor
  1 sibling, 0 replies; 52+ messages in thread
From: Ian Campbell @ 2014-02-10 15:37 UTC (permalink / raw)
  To: Julien Grall; +Cc: xen-devel, Tim Deegan, Jan Beulich, stefano.stabellini

On Mon, 2014-02-10 at 15:33 +0000, Julien Grall wrote:
> On 02/10/2014 03:27 PM, Ian Campbell wrote:
> > Wouldn't it be incorrect to generically return OK for mapping a
> > DOMID_XEN owned page -- at least something needs to validate that the
> > particular mfn being mapped is supposed to be shared with the guest in
> > question.
> 
> It's already the case. By default a xen heap page doesn't belong to
> DOMID_XEN. Xen has to explicitly call share_xen_page_with_privileged
> guess (see an example in xen/common/trace.c:244) to set DOMID_XEN to the
> given page.

Ah, I didn't know this.

> > TBH, it doesn't seem that this mechanism for sharing xenpages with
> > guests is a good fit for PVH or HVM guests/dom0. Perhaps a specific
> > mapspace would be more appropriate?
> 
> Following my explanation just above, I don't think we need to have a
> specific mapspace. XSM and DOMID_XEN will already protect correctly the
> mapping of xen heap page.

Right.

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2013-12-17  2:38 ` [V7 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
                     ` (2 preceding siblings ...)
  2014-01-28  1:55   ` Mukesh Rathor
@ 2014-02-12 16:47   ` Julien Grall
  2014-02-20  2:22     ` Mukesh Rathor
  3 siblings, 1 reply; 52+ messages in thread
From: Julien Grall @ 2014-02-12 16:47 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen,
	Jan Beulich, dgdegra

Hi Mukesh,

On 12/17/2013 02:38 AM, Mukesh Rathor wrote:
> In preparation for the next patch, we update xsm_add_to_physmap to
> allow for checking of foreign domain. Thus, the current domain must
> have the right to update the mappings of target domain with pages from
> foreign domain.
> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>

While I was playing with XSM on ARM, I have noticed that Daniel De Graff
has added xsm_map_gfmn_foreign few months ago (see commit 0b201e6).

Would it be suitable to use this XSM instead of extending
xsm_add_to_physmap?

Regards,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-12 16:47   ` Julien Grall
@ 2014-02-20  2:22     ` Mukesh Rathor
  2014-02-20 13:49       ` Julien Grall
  0 siblings, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2014-02-20  2:22 UTC (permalink / raw)
  To: Julien Grall
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen,
	Jan Beulich, dgdegra

On Wed, 12 Feb 2014 16:47:54 +0000
Julien Grall <julien.grall@linaro.org> wrote:

> Hi Mukesh,
> 
> On 12/17/2013 02:38 AM, Mukesh Rathor wrote:
> > In preparation for the next patch, we update xsm_add_to_physmap to
> > allow for checking of foreign domain. Thus, the current domain must
> > have the right to update the mappings of target domain with pages
> > from foreign domain.
> > 
> > Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> 
> While I was playing with XSM on ARM, I have noticed that Daniel De
> Graff has added xsm_map_gfmn_foreign few months ago (see commit
> 0b201e6).
> 
> Would it be suitable to use this XSM instead of extending
> xsm_add_to_physmap?
> 
> Regards,
> 

Not the same thing. add to physmap could be adding to a domain's
physmap pages from a foreign domain.

Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-10 15:33                             ` Julien Grall
  2014-02-10 15:37                               ` Ian Campbell
@ 2014-02-20  2:37                               ` Mukesh Rathor
  2014-02-20  8:31                                 ` Jan Beulich
  1 sibling, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2014-02-20  2:37 UTC (permalink / raw)
  To: Julien Grall
  Cc: xen-devel, Tim Deegan, Ian Campbell, Jan Beulich, stefano.stabellini

On Mon, 10 Feb 2014 15:33:18 +0000
Julien Grall <julien.grall@linaro.org> wrote:

> On 02/10/2014 03:27 PM, Ian Campbell wrote:
> > On Mon, 2014-02-10 at 15:16 +0000, Julien Grall wrote:
> >> Hi Ian,
> >>
> >> On 02/10/2014 01:42 PM, Ian Campbell wrote:
> >>> On Sun, 2014-02-09 at 16:51 +0000, Julien Grall wrote:
> >>>> Hello Mukesh,
> >>>>
> >>>> On 30/01/14 01:33, Mukesh Rathor wrote:
> >>>>>>> I'm not sure what you mean:
> >>>>>>>   - the code that Mukesh is adding doesn't have a struct
> >>
> >> For ARM, a call to xc_map_foreign_page will end up to
> >> XENMEM_add_to_physmap_range(XENMAPSPACE_gmfn_foreign).
> >>
> >> For both architecture, you can look at the function
> >> xen_remap_map_domain_mfn_range (implemented differently on ARM and
> >> x86) which is the last function called before going to the
> >> hypervisor.
> >>
> >> If we don't modify the hypercall XENMEM_add_to_physmap, we will
> >> have a add a new way to map Xen page for xentrace & co.
> > 
> > Wouldn't it be incorrect to generically return OK for mapping a
> > DOMID_XEN owned page -- at least something needs to validate that
> > the particular mfn being mapped is supposed to be shared with the
> > guest in question.
> 
> It's already the case. By default a xen heap page doesn't belong to
> DOMID_XEN. Xen has to explicitly call share_xen_page_with_privileged
> guess (see an example in xen/common/trace.c:244) to set DOMID_XEN to
> the given page.


So, how about following:

+++ b/xen/common/domain.c
@@ -484,8 +484,20 @@ struct domain *rcu_lock_domain_by_any_id(domid_t dom)
 
  int rcu_lock_remote_domain_by_id(domid_t dom, struct domain **d)
   {
   -    if ( (*d = rcu_lock_domain_by_id(dom)) == NULL )
   +
   +    if ( (dom == DOMID_XEN && (*d = rcu_lock_domain(dom_xen)) == NULL) ||
   +         (dom != DOMID_XEN && (*d = rcu_lock_domain_by_id(dom)) == NULL) )
   +        return -ESRCH;
   +


Ack/Nack?

Thanks
Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-20  2:37                               ` Mukesh Rathor
@ 2014-02-20  8:31                                 ` Jan Beulich
  0 siblings, 0 replies; 52+ messages in thread
From: Jan Beulich @ 2014-02-20  8:31 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: xen-devel, Julien Grall, Tim Deegan, Ian Campbell, stefano.stabellini

>>> On 20.02.14 at 03:37, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> On Mon, 10 Feb 2014 15:33:18 +0000
> Julien Grall <julien.grall@linaro.org> wrote:
> 
>> On 02/10/2014 03:27 PM, Ian Campbell wrote:
>> > On Mon, 2014-02-10 at 15:16 +0000, Julien Grall wrote:
>> >> Hi Ian,
>> >>
>> >> On 02/10/2014 01:42 PM, Ian Campbell wrote:
>> >>> On Sun, 2014-02-09 at 16:51 +0000, Julien Grall wrote:
>> >>>> Hello Mukesh,
>> >>>>
>> >>>> On 30/01/14 01:33, Mukesh Rathor wrote:
>> >>>>>>> I'm not sure what you mean:
>> >>>>>>>   - the code that Mukesh is adding doesn't have a struct
>> >>
>> >> For ARM, a call to xc_map_foreign_page will end up to
>> >> XENMEM_add_to_physmap_range(XENMAPSPACE_gmfn_foreign).
>> >>
>> >> For both architecture, you can look at the function
>> >> xen_remap_map_domain_mfn_range (implemented differently on ARM and
>> >> x86) which is the last function called before going to the
>> >> hypervisor.
>> >>
>> >> If we don't modify the hypercall XENMEM_add_to_physmap, we will
>> >> have a add a new way to map Xen page for xentrace & co.
>> > 
>> > Wouldn't it be incorrect to generically return OK for mapping a
>> > DOMID_XEN owned page -- at least something needs to validate that
>> > the particular mfn being mapped is supposed to be shared with the
>> > guest in question.
>> 
>> It's already the case. By default a xen heap page doesn't belong to
>> DOMID_XEN. Xen has to explicitly call share_xen_page_with_privileged
>> guess (see an example in xen/common/trace.c:244) to set DOMID_XEN to
>> the given page.
> 
> 
> So, how about following:
> 
> +++ b/xen/common/domain.c
> @@ -484,8 +484,20 @@ struct domain *rcu_lock_domain_by_any_id(domid_t dom)
>  
>   int rcu_lock_remote_domain_by_id(domid_t dom, struct domain **d)
>    {
>    -    if ( (*d = rcu_lock_domain_by_id(dom)) == NULL )
>    +
>    +    if ( (dom == DOMID_XEN && (*d = rcu_lock_domain(dom_xen)) == NULL) ||
>    +         (dom != DOMID_XEN && (*d = rcu_lock_domain_by_id(dom)) == NULL) )
>    +        return -ESRCH;
>    +

Changing a very generic function for a very special purpose is
almost never the right thing: I'd suppose that _if_ this is really the
right thing to do in the first place (which I continue to doubt without
being presented with full context), it should be done in the one or
very few call sites that actually need this (depending on their count
a new helper function might then be worthwhile introducing).

Jan

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-20  2:22     ` Mukesh Rathor
@ 2014-02-20 13:49       ` Julien Grall
  2014-02-21  1:22         ` Mukesh Rathor
  0 siblings, 1 reply; 52+ messages in thread
From: Julien Grall @ 2014-02-20 13:49 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen,
	Jan Beulich, dgdegra

On 02/20/2014 02:22 AM, Mukesh Rathor wrote:
> On Wed, 12 Feb 2014 16:47:54 +0000
> Julien Grall <julien.grall@linaro.org> wrote:
> 
>> Hi Mukesh,
>>
>> On 12/17/2013 02:38 AM, Mukesh Rathor wrote:
>>> In preparation for the next patch, we update xsm_add_to_physmap to
>>> allow for checking of foreign domain. Thus, the current domain must
>>> have the right to update the mappings of target domain with pages
>>> from foreign domain.
>>>
>>> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
>>
>> While I was playing with XSM on ARM, I have noticed that Daniel De
>> Graff has added xsm_map_gfmn_foreign few months ago (see commit
>> 0b201e6).
>>
>> Would it be suitable to use this XSM instead of extending
>> xsm_add_to_physmap?
>>
>> Regards,
>>
> 
> Not the same thing. add to physmap could be adding to a domain's
> physmap pages from a foreign domain.

Let assume you don't modify xsm_add_to_physmap, in this case:
   - xsm_add_to_physmap checks if the current domain is allowed to
modify the p2m of a given domain
   - xsm_map_gfmn_foreign checks if the given domain is allowed to have
foreign mapping from the foreign domain

Both XSM are distinct and should be used together. You don't care that
the current domain can modify a given P2M domain to add foreign mapping.
You only want to know if a given domain is able to have foreign mapping
from a specific foreign domain.

IHMO, your solution to modify xsm_add_to_physmap will complexify the
policy because you need to explicitly say:
   - my domain A is able to modify P2M of domain B which is able to have
foreign map from domain C
   - my domain D is able to modify P2M of domain B which is able to have
foreign map from domain C

The last part is redundant.

Cheers,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-20 13:49       ` Julien Grall
@ 2014-02-21  1:22         ` Mukesh Rathor
  2014-02-21 23:53           ` Mukesh Rathor
  0 siblings, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2014-02-21  1:22 UTC (permalink / raw)
  To: Julien Grall
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen,
	Jan Beulich, dgdegra

On Thu, 20 Feb 2014 13:49:58 +0000
Julien Grall <julien.grall@linaro.org> wrote:

> On 02/20/2014 02:22 AM, Mukesh Rathor wrote:
> > On Wed, 12 Feb 2014 16:47:54 +0000
> > Julien Grall <julien.grall@linaro.org> wrote:
> > 
> >> Hi Mukesh,
> >>
> >> On 12/17/2013 02:38 AM, Mukesh Rathor wrote:
> >>> In preparation for the next patch, we update xsm_add_to_physmap to
> >>> allow for checking of foreign domain. Thus, the current domain
> >>> must have the right to update the mappings of target domain with
> >>> pages from foreign domain.
> >>>
> >>> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> >>
> >> While I was playing with XSM on ARM, I have noticed that Daniel De
> >> Graff has added xsm_map_gfmn_foreign few months ago (see commit
> >> 0b201e6).
> >>
> >> Would it be suitable to use this XSM instead of extending
> >> xsm_add_to_physmap?
> >>
> >> Regards,
> >>
> > 
> > Not the same thing. add to physmap could be adding to a domain's
> > physmap pages from a foreign domain.
> 
> Let assume you don't modify xsm_add_to_physmap, in this case:
>    - xsm_add_to_physmap checks if the current domain is allowed to
> modify the p2m of a given domain
>    - xsm_map_gfmn_foreign checks if the given domain is allowed to
> have foreign mapping from the foreign domain
> 
> Both XSM are distinct and should be used together. You don't care that

I see, i thought you meant replace one with another. I am not a security 
expert, so just followed the suggestions. But looking at the code
looks like above is the way to go, and I can just drop my xsm_add_to_physmap
change patch (which btw doesn't check whether target has access to
foreign mappings, so is prob not correct). Thanks for noticing.

Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-21  1:22         ` Mukesh Rathor
@ 2014-02-21 23:53           ` Mukesh Rathor
  2014-02-22  0:20             ` Julien Grall
  0 siblings, 1 reply; 52+ messages in thread
From: Mukesh Rathor @ 2014-02-21 23:53 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, Julien Grall, tim,
	keir.xen, Jan Beulich, dgdegra

On Thu, 20 Feb 2014 17:22:34 -0800
Mukesh Rathor <mukesh.rathor@oracle.com> wrote:

> On Thu, 20 Feb 2014 13:49:58 +0000
> Julien Grall <julien.grall@linaro.org> wrote:
> 
> > On 02/20/2014 02:22 AM, Mukesh Rathor wrote:
> > > On Wed, 12 Feb 2014 16:47:54 +0000
> > > Julien Grall <julien.grall@linaro.org> wrote:
> > > 
> > >> Hi Mukesh,
> > >>
> > >> On 12/17/2013 02:38 AM, Mukesh Rathor wrote:
> > >>> In preparation for the next patch, we update xsm_add_to_physmap
> > >>> to allow for checking of foreign domain. Thus, the current
> > >>> domain must have the right to update the mappings of target
> > >>> domain with pages from foreign domain.
> > >>>
> > >>> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> > >>
> > >> While I was playing with XSM on ARM, I have noticed that Daniel
> > >> De Graff has added xsm_map_gfmn_foreign few months ago (see
> > >> commit 0b201e6).
> > >>
> > >> Would it be suitable to use this XSM instead of extending
> > >> xsm_add_to_physmap?
> > >>
> > >> Regards,
> > >>
> > > 
> > > Not the same thing. add to physmap could be adding to a domain's
> > > physmap pages from a foreign domain.
> > 
> > Let assume you don't modify xsm_add_to_physmap, in this case:
> >    - xsm_add_to_physmap checks if the current domain is allowed to
> > modify the p2m of a given domain
> >    - xsm_map_gfmn_foreign checks if the given domain is allowed to
> > have foreign mapping from the foreign domain
> > 
> > Both XSM are distinct and should be used together. You don't care
> > that
> 
> I see, i thought you meant replace one with another. I am not a
> security expert, so just followed the suggestions. But looking at the
> code looks like above is the way to go, and I can just drop my
> xsm_add_to_physmap change patch (which btw doesn't check whether
> target has access to foreign mappings, so is prob not correct).
> Thanks for noticing.


BTW, in include/xsm/xsm.h, shouldn't 

static inline int xsm_map_gmfn_foreign (struct domain *d, struct domain *t)

be

static inline int xsm_map_gmfn_foreign (xsm_default_t def, struct domain *d, struct domain *t)

not sure how you were able to compile xsm enabled in arm???

thanks
Mukesh

^ permalink raw reply	[flat|nested] 52+ messages in thread

* Re: [V7 PATCH 5/7] pvh: change xsm_add_to_physmap
  2014-02-21 23:53           ` Mukesh Rathor
@ 2014-02-22  0:20             ` Julien Grall
  0 siblings, 0 replies; 52+ messages in thread
From: Julien Grall @ 2014-02-22  0:20 UTC (permalink / raw)
  To: Mukesh Rathor
  Cc: Xen-devel, Ian Campbell, george.dunlap, tim, keir.xen,
	Jan Beulich, dgdegra



On 21/02/14 23:53, Mukesh Rathor wrote:
>
> BTW, in include/xsm/xsm.h, shouldn't
>
> static inline int xsm_map_gmfn_foreign (struct domain *d, struct domain *t)
>
> be
>
> static inline int xsm_map_gmfn_foreign (xsm_default_t def, struct domain *d, struct domain *t)
>
> not sure how you were able to compile xsm enabled in arm???

XSM doesn't compile at all on ARM. I'm currently working on a patch 
series for that.

One of the patch fixes the xsm_map_gfmn_foreign see: 
http://xenbits.xen.org/gitweb/?p=people/julieng/xen-unstable.git;a=commit;h=8414a0e5873d1dbcb3a7d20c45bcb2e683142dc5

If you want, I can send the patch monday.

Cheers,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 52+ messages in thread

end of thread, other threads:[~2014-02-22  0:20 UTC | newest]

Thread overview: 52+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-12-17  2:38 [V7 PATCH 0/7]: PVH dom0 Mukesh Rathor
2013-12-17  2:38 ` [V7 PATCH 1/7] pvh dom0: move some pv specific code to static functions Mukesh Rathor
2013-12-17  2:38 ` [V7 PATCH 2/7] pvh dom0: construct_dom0 changes Mukesh Rathor
2013-12-17  2:38 ` [V7 PATCH 3/7] pvh dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
2013-12-17 13:07   ` Jan Beulich
2013-12-17 13:59     ` Ian Campbell
2013-12-17 14:36       ` Jan Beulich
2013-12-17 14:40         ` Ian Campbell
2013-12-17 15:11           ` Jan Beulich
2013-12-17 15:34             ` Ian Campbell
2013-12-18  7:55             ` Jan Beulich
2013-12-18 10:07               ` Ian Campbell
2013-12-18 10:34                 ` Jan Beulich
2013-12-18 10:41                   ` Ian Campbell
2013-12-18 10:55                     ` Jan Beulich
2013-12-17 23:57     ` Mukesh Rathor
2013-12-18 10:00       ` Ian Campbell
2013-12-17 16:56   ` Jan Beulich
2013-12-17  2:38 ` [V7 PATCH 4/7] pvh dom0: Introduce p2m_map_foreign Mukesh Rathor
2013-12-17  2:38 ` [V7 PATCH 5/7] pvh: change xsm_add_to_physmap Mukesh Rathor
2013-12-17  8:32   ` Jan Beulich
2013-12-18  0:19     ` Mukesh Rathor
2013-12-18  8:07       ` Jan Beulich
2013-12-19 15:50   ` Daniel De Graaf
2013-12-19 19:55     ` Mukesh Rathor
2014-01-28  1:55   ` Mukesh Rathor
2014-01-28 10:31     ` Jan Beulich
2014-01-29  2:08       ` Mukesh Rathor
2014-01-29 10:40         ` Ian Campbell
2014-01-29 11:38           ` Tim Deegan
2014-01-29 11:41             ` Ian Campbell
2014-01-29 11:48               ` Tim Deegan
2014-01-29 11:51                 ` Ian Campbell
2014-01-30  1:33                   ` Mukesh Rathor
2014-02-09 16:51                     ` Julien Grall
2014-02-10 13:42                       ` Ian Campbell
2014-02-10 15:16                         ` Julien Grall
2014-02-10 15:27                           ` Ian Campbell
2014-02-10 15:33                             ` Julien Grall
2014-02-10 15:37                               ` Ian Campbell
2014-02-20  2:37                               ` Mukesh Rathor
2014-02-20  8:31                                 ` Jan Beulich
2014-02-12 16:47   ` Julien Grall
2014-02-20  2:22     ` Mukesh Rathor
2014-02-20 13:49       ` Julien Grall
2014-02-21  1:22         ` Mukesh Rathor
2014-02-21 23:53           ` Mukesh Rathor
2014-02-22  0:20             ` Julien Grall
2013-12-17  2:38 ` [V7 PATCH 6/7] pvh dom0: Add and remove foreign pages Mukesh Rathor
2013-12-17  2:38 ` [V7 PATCH 7/7] pvh dom0: add opt_dom0pvh to setup.c Mukesh Rathor
2013-12-17 14:46 ` [V7 PATCH 0/7]: PVH dom0 Konrad Rzeszutek Wilk
2013-12-18  0:14   ` Mukesh Rathor

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.