All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/3] x86/p2m: some code simplification
@ 2017-07-05 10:02 Jan Beulich
  2017-07-05 10:05 ` [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level() Jan Beulich
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Jan Beulich @ 2017-07-05 10:02 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

1: simplify p2m_next_level()
2: make p2m_alloc_ptp() return an MFN
3: pass level instead of page type to p2m_next_level()

Signed-off-by: Jan Beulich <jbeulich@suse.com>

See individual patches for change info.


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level()
  2017-07-05 10:02 [PATCH v2 0/3] x86/p2m: some code simplification Jan Beulich
@ 2017-07-05 10:05 ` Jan Beulich
  2017-07-27 16:19   ` George Dunlap
  2017-07-05 10:05 ` [PATCH v2 2/3] x86/p2m: make p2m_alloc_ptp() return an MFN Jan Beulich
  2017-07-05 10:06 ` [PATCH v2 3/3] x86/p2m-pt: pass level instead of page type to p2m_next_level() Jan Beulich
  2 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2017-07-05 10:05 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

Calculate entry PFN and flags just once. Convert the two successive
main if()-s to and if/elf-if chain. Restrict variable scope where
reasonable. Take the opportunity and also make the induction variable
unsigned.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Re-do mostly from scratch following review feedback.
Note: I have trouble seeing how the old code worked, when the 2M page
      shattering path specified neither read nor write permission for
      the IOMMU. Am I overlooking a reason why this was (and should
      remain) so?

--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -191,18 +191,18 @@ p2m_next_level(struct p2m_domain *p2m, v
                unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
                u32 max, unsigned long type, bool_t unmap)
 {
-    l1_pgentry_t *l1_entry;
-    l1_pgentry_t *p2m_entry;
-    l1_pgentry_t new_entry;
+    l1_pgentry_t *p2m_entry, new_entry;
     void *next;
-    int i;
+    unsigned int flags;
 
     if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
                                       shift, max)) )
         return -ENOENT;
 
+    flags = l1e_get_flags(*p2m_entry);
+
     /* PoD/paging: Not present doesn't imply empty. */
-    if ( !l1e_get_flags(*p2m_entry) )
+    if ( !flags )
     {
         struct page_info *pg;
 
@@ -231,70 +231,61 @@ p2m_next_level(struct p2m_domain *p2m, v
             break;
         }
     }
-
-    ASSERT(l1e_get_flags(*p2m_entry) & (_PAGE_PRESENT|_PAGE_PSE));
-
-    /* split 1GB pages into 2MB pages */
-    if ( type == PGT_l2_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
+    else if ( flags & _PAGE_PSE )
     {
-        unsigned long flags, pfn;
+        /* Split superpages pages into smaller ones. */
+        unsigned long pfn = l1e_get_pfn(*p2m_entry);
         struct page_info *pg;
+        l1_pgentry_t *l1_entry;
+        unsigned int i, level;
 
-        pg = p2m_alloc_ptp(p2m, PGT_l2_page_table);
-        if ( pg == NULL )
-            return -ENOMEM;
-
-        flags = l1e_get_flags(*p2m_entry);
-        pfn = l1e_get_pfn(*p2m_entry);
-
-        l1_entry = __map_domain_page(pg);
-        for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
+        switch ( type )
         {
-            new_entry = l1e_from_pfn(pfn | (i * L1_PAGETABLE_ENTRIES), flags);
-            p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
-            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2);
-        }
-        unmap_domain_page(l1_entry);
-        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
-                                 P2M_BASE_FLAGS | _PAGE_RW); /* disable PSE */
-        p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
-        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
-    }
+        case PGT_l2_page_table:
+            level = 2;
+            break;
 
+        case PGT_l1_page_table:
+            /*
+             * New splintered mappings inherit the flags of the old superpage,
+             * with a little reorganisation for the _PAGE_PSE_PAT bit.
+             */
+            if ( pfn & 1 )           /* ==> _PAGE_PSE_PAT was set */
+                pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
+            else
+                flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
 
-    /* split single 2MB large page into 4KB page in P2M table */
-    if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
-    {
-        unsigned long flags, pfn;
-        struct page_info *pg;
+            level = 1;
+            break;
 
-        pg = p2m_alloc_ptp(p2m, PGT_l1_page_table);
+        default:
+            ASSERT_UNREACHABLE();
+            return -EINVAL;
+        }
+
+        pg = p2m_alloc_ptp(p2m, type);
         if ( pg == NULL )
             return -ENOMEM;
 
-        /* New splintered mappings inherit the flags of the old superpage, 
-         * with a little reorganisation for the _PAGE_PSE_PAT bit. */
-        flags = l1e_get_flags(*p2m_entry);
-        pfn = l1e_get_pfn(*p2m_entry);
-        if ( pfn & 1 )           /* ==> _PAGE_PSE_PAT was set */
-            pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
-        else
-            flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
-        
         l1_entry = __map_domain_page(pg);
-        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+
+        for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
         {
-            new_entry = l1e_from_pfn(pfn | i, flags);
-            p2m_add_iommu_flags(&new_entry, 0, 0);
-            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1);
+            new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
+                                     flags);
+            p2m_add_iommu_flags(&new_entry, level - 1,
+                                IOMMUF_readable|IOMMUF_writable);
+            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
         }
+
         unmap_domain_page(l1_entry);
-        
         new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
                                  P2M_BASE_FLAGS | _PAGE_RW);
-        p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
-        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
+        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
+        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
     }
+    else
+        ASSERT(flags & _PAGE_PRESENT);
 
     next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
     if ( unmap )



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 2/3] x86/p2m: make p2m_alloc_ptp() return an MFN
  2017-07-05 10:02 [PATCH v2 0/3] x86/p2m: some code simplification Jan Beulich
  2017-07-05 10:05 ` [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level() Jan Beulich
@ 2017-07-05 10:05 ` Jan Beulich
  2017-07-05 10:06 ` [PATCH v2 3/3] x86/p2m-pt: pass level instead of page type to p2m_next_level() Jan Beulich
  2 siblings, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2017-07-05 10:05 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

None of the callers really needs the struct page_info pointer.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
---
v2: Re-base over changes to patch 1.

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -569,7 +569,7 @@ int p2m_set_entry(struct p2m_domain *p2m
     return rc;
 }
 
-struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
+mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
 {
     struct page_info *pg;
 
@@ -577,13 +577,13 @@ struct page_info *p2m_alloc_ptp(struct p
     ASSERT(p2m->domain);
     ASSERT(p2m->domain->arch.paging.alloc_page);
     pg = p2m->domain->arch.paging.alloc_page(p2m->domain);
-    if (pg == NULL)
-        return NULL;
+    if ( !pg )
+        return INVALID_MFN;
 
     page_list_add_tail(pg, &p2m->pages);
     pg->u.inuse.type_info = type | 1 | PGT_validated;
 
-    return pg;
+    return page_to_mfn(pg);
 }
 
 void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg)
@@ -609,7 +609,7 @@ void p2m_free_ptp(struct p2m_domain *p2m
  */
 int p2m_alloc_table(struct p2m_domain *p2m)
 {
-    struct page_info *p2m_top;
+    mfn_t top_mfn;
     struct domain *d = p2m->domain;
     int rc = 0;
 
@@ -632,14 +632,14 @@ int p2m_alloc_table(struct p2m_domain *p
 
     P2M_PRINTK("allocating p2m table\n");
 
-    p2m_top = p2m_alloc_ptp(p2m, PGT_l4_page_table);
-    if ( p2m_top == NULL )
+    top_mfn = p2m_alloc_ptp(p2m, PGT_l4_page_table);
+    if ( mfn_eq(top_mfn, INVALID_MFN) )
     {
         p2m_unlock(p2m);
         return -ENOMEM;
     }
 
-    p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
+    p2m->phys_table = pagetable_from_mfn(top_mfn);
 
     if ( hap_enabled(d) )
         iommu_share_p2m_table(d);
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -225,16 +225,16 @@ static void ept_p2m_type_to_flags(struct
 /* Fill in middle levels of ept table */
 static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)
 {
-    struct page_info *pg;
+    mfn_t mfn;
     ept_entry_t *table;
     unsigned int i;
 
-    pg = p2m_alloc_ptp(p2m, 0);
-    if ( pg == NULL )
+    mfn = p2m_alloc_ptp(p2m, 0);
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return 0;
 
     ept_entry->epte = 0;
-    ept_entry->mfn = page_to_mfn(pg);
+    ept_entry->mfn = mfn_x(mfn);
     ept_entry->access = p2m->default_access;
 
     ept_entry->r = ept_entry->w = ept_entry->x = 1;
@@ -243,7 +243,7 @@ static int ept_set_middle_entry(struct p
 
     ept_entry->suppress_ve = 1;
 
-    table = __map_domain_page(pg);
+    table = map_domain_page(mfn);
 
     for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
         table[i].suppress_ve = 1;
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -204,14 +204,12 @@ p2m_next_level(struct p2m_domain *p2m, v
     /* PoD/paging: Not present doesn't imply empty. */
     if ( !flags )
     {
-        struct page_info *pg;
+        mfn_t mfn = p2m_alloc_ptp(p2m, type);
 
-        pg = p2m_alloc_ptp(p2m, type);
-        if ( pg == NULL )
+        if ( mfn_eq(mfn, INVALID_MFN) )
             return -ENOMEM;
 
-        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
-                                 P2M_BASE_FLAGS | _PAGE_RW);
+        new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
 
         switch ( type ) {
         case PGT_l3_page_table:
@@ -235,7 +233,7 @@ p2m_next_level(struct p2m_domain *p2m, v
     {
         /* Split superpages pages into smaller ones. */
         unsigned long pfn = l1e_get_pfn(*p2m_entry);
-        struct page_info *pg;
+        mfn_t mfn;
         l1_pgentry_t *l1_entry;
         unsigned int i, level;
 
@@ -263,11 +261,11 @@ p2m_next_level(struct p2m_domain *p2m, v
             return -EINVAL;
         }
 
-        pg = p2m_alloc_ptp(p2m, type);
-        if ( pg == NULL )
+        mfn = p2m_alloc_ptp(p2m, type);
+        if ( mfn_eq(mfn, INVALID_MFN) )
             return -ENOMEM;
 
-        l1_entry = __map_domain_page(pg);
+        l1_entry = map_domain_page(mfn);
 
         for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
         {
@@ -279,8 +277,7 @@ p2m_next_level(struct p2m_domain *p2m, v
         }
 
         unmap_domain_page(l1_entry);
-        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
-                                 P2M_BASE_FLAGS | _PAGE_RW);
+        new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
         p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
     }
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -684,7 +684,7 @@ void p2m_mem_paging_resume(struct domain
  * Internal functions, only called by other p2m code
  */
 
-struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
+mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
 void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
 
 /* Directly set a p2m entry: only for use by p2m code. Does not need



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 3/3] x86/p2m-pt: pass level instead of page type to p2m_next_level()
  2017-07-05 10:02 [PATCH v2 0/3] x86/p2m: some code simplification Jan Beulich
  2017-07-05 10:05 ` [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level() Jan Beulich
  2017-07-05 10:05 ` [PATCH v2 2/3] x86/p2m: make p2m_alloc_ptp() return an MFN Jan Beulich
@ 2017-07-05 10:06 ` Jan Beulich
  2017-07-27 16:50   ` George Dunlap
  2 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2017-07-05 10:06 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

This in turn calls for p2m_alloc_ptp() also being passed the numeric
level.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.
---
Question is whether passing the level to p2m_alloc_ptp() is really all
that useful: p2m-ept.c's only use passes zero anyway, and p2m.c's
uniform passing of 4 doesn't necessarily match reality afaict.

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -569,7 +569,7 @@ int p2m_set_entry(struct p2m_domain *p2m
     return rc;
 }
 
-mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
+mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level)
 {
     struct page_info *pg;
 
@@ -581,7 +581,10 @@ mfn_t p2m_alloc_ptp(struct p2m_domain *p
         return INVALID_MFN;
 
     page_list_add_tail(pg, &p2m->pages);
-    pg->u.inuse.type_info = type | 1 | PGT_validated;
+    BUILD_BUG_ON(PGT_l1_page_table * 2 != PGT_l2_page_table);
+    BUILD_BUG_ON(PGT_l1_page_table * 3 != PGT_l3_page_table);
+    BUILD_BUG_ON(PGT_l1_page_table * 4 != PGT_l4_page_table);
+    pg->u.inuse.type_info = (PGT_l1_page_table * level) | 1 | PGT_validated;
 
     return page_to_mfn(pg);
 }
@@ -632,7 +635,7 @@ int p2m_alloc_table(struct p2m_domain *p
 
     P2M_PRINTK("allocating p2m table\n");
 
-    top_mfn = p2m_alloc_ptp(p2m, PGT_l4_page_table);
+    top_mfn = p2m_alloc_ptp(p2m, 4);
     if ( mfn_eq(top_mfn, INVALID_MFN) )
     {
         p2m_unlock(p2m);
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -71,12 +71,6 @@
 #define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent))
 #define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED))
 
-static const unsigned long pgt[] = {
-    PGT_l1_page_table,
-    PGT_l2_page_table,
-    PGT_l3_page_table
-};
-
 static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
                                        p2m_type_t t,
                                        mfn_t mfn,
@@ -189,7 +183,7 @@ static void p2m_add_iommu_flags(l1_pgent
 static int
 p2m_next_level(struct p2m_domain *p2m, void **table,
                unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
-               u32 max, unsigned long type, bool_t unmap)
+               u32 max, unsigned int level, bool_t unmap)
 {
     l1_pgentry_t *p2m_entry, new_entry;
     void *next;
@@ -204,30 +198,15 @@ p2m_next_level(struct p2m_domain *p2m, v
     /* PoD/paging: Not present doesn't imply empty. */
     if ( !flags )
     {
-        mfn_t mfn = p2m_alloc_ptp(p2m, type);
+        mfn_t mfn = p2m_alloc_ptp(p2m, level);
 
         if ( mfn_eq(mfn, INVALID_MFN) )
             return -ENOMEM;
 
         new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
 
-        switch ( type ) {
-        case PGT_l3_page_table:
-            p2m_add_iommu_flags(&new_entry, 3, IOMMUF_readable|IOMMUF_writable);
-            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 4);
-            break;
-        case PGT_l2_page_table:
-            p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
-            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
-            break;
-        case PGT_l1_page_table:
-            p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
-            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
-            break;
-        default:
-            BUG();
-            break;
-        }
+        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
+        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
     }
     else if ( flags & _PAGE_PSE )
     {
@@ -235,15 +214,14 @@ p2m_next_level(struct p2m_domain *p2m, v
         unsigned long pfn = l1e_get_pfn(*p2m_entry);
         mfn_t mfn;
         l1_pgentry_t *l1_entry;
-        unsigned int i, level;
+        unsigned int i;
 
-        switch ( type )
+        switch ( level )
         {
-        case PGT_l2_page_table:
-            level = 2;
+        case 2:
             break;
 
-        case PGT_l1_page_table:
+        case 1:
             /*
              * New splintered mappings inherit the flags of the old superpage,
              * with a little reorganisation for the _PAGE_PSE_PAT bit.
@@ -252,8 +230,6 @@ p2m_next_level(struct p2m_domain *p2m, v
                 pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
             else
                 flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
-
-            level = 1;
             break;
 
         default:
@@ -261,7 +237,7 @@ p2m_next_level(struct p2m_domain *p2m, v
             return -EINVAL;
         }
 
-        mfn = p2m_alloc_ptp(p2m, type);
+        mfn = p2m_alloc_ptp(p2m, level);
         if ( mfn_eq(mfn, INVALID_MFN) )
             return -ENOMEM;
 
@@ -325,7 +301,7 @@ static int p2m_pt_set_recalc_range(struc
 
         err = p2m_next_level(p2m, &table, &gfn_remainder, first_gfn,
                              i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER,
-                             pgt[i - 1], 1);
+                             i, 1);
         if ( err )
             goto out;
     }
@@ -393,7 +369,7 @@ static int do_recalc(struct p2m_domain *
 
         err = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                              level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER,
-                             pgt[level - 1], 0);
+                             level, 0);
         if ( err )
             goto out;
 
@@ -557,7 +533,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                         L4_PAGETABLE_SHIFT - PAGE_SHIFT,
-                        L4_PAGETABLE_ENTRIES, PGT_l3_page_table, 1);
+                        L4_PAGETABLE_ENTRIES, 3, 1);
     if ( rc )
         goto out;
 
@@ -605,7 +581,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     {
         rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                             L3_PAGETABLE_SHIFT - PAGE_SHIFT,
-                            L3_PAGETABLE_ENTRIES, PGT_l2_page_table, 1);
+                            L3_PAGETABLE_ENTRIES, 2, 1);
         if ( rc )
             goto out;
     }
@@ -616,7 +592,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
 
         rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                             L2_PAGETABLE_SHIFT - PAGE_SHIFT,
-                            L2_PAGETABLE_ENTRIES, PGT_l1_page_table, 1);
+                            L2_PAGETABLE_ENTRIES, 1, 1);
         if ( rc )
             goto out;
 
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -684,7 +684,7 @@ void p2m_mem_paging_resume(struct domain
  * Internal functions, only called by other p2m code
  */
 
-mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
+mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level);
 void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
 
 /* Directly set a p2m entry: only for use by p2m code. Does not need



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level()
  2017-07-05 10:05 ` [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level() Jan Beulich
@ 2017-07-27 16:19   ` George Dunlap
  2017-08-11 11:53     ` Jan Beulich
  2017-08-11 12:28     ` Jan Beulich
  0 siblings, 2 replies; 9+ messages in thread
From: George Dunlap @ 2017-07-27 16:19 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Andrew Cooper

On Wed, Jul 5, 2017 at 11:05 AM, Jan Beulich <JBeulich@suse.com> wrote:
> Calculate entry PFN and flags just once. Convert the two successive
> main if()-s to and if/elf-if chain. Restrict variable scope where
> reasonable. Take the opportunity and also make the induction variable
> unsigned.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Re-do mostly from scratch following review feedback.
> Note: I have trouble seeing how the old code worked, when the 2M page
>       shattering path specified neither read nor write permission for
>       the IOMMU. Am I overlooking a reason why this was (and should
>       remain) so?

Hmm -- given that in all other circumstances, a 4k page which is
ram_rw gets RW, then I think the old code must certainly be buggy.

But is your code correct?  It looks like you unconditionally give it
RW, when for ram_ro, for example it should be R (not W).  It seems
like we should either call p2m_get_iommu_flags(), or ASSERT() that the
resulting flags would be RW.

Everything else looks good, thanks.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 3/3] x86/p2m-pt: pass level instead of page type to p2m_next_level()
  2017-07-05 10:06 ` [PATCH v2 3/3] x86/p2m-pt: pass level instead of page type to p2m_next_level() Jan Beulich
@ 2017-07-27 16:50   ` George Dunlap
  2017-08-11 13:07     ` Jan Beulich
  0 siblings, 1 reply; 9+ messages in thread
From: George Dunlap @ 2017-07-27 16:50 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Andrew Cooper

On Wed, Jul 5, 2017 at 11:06 AM, Jan Beulich <JBeulich@suse.com> wrote:
> This in turn calls for p2m_alloc_ptp() also being passed the numeric
> level.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: New.
> ---
> Question is whether passing the level to p2m_alloc_ptp() is really all
> that useful: p2m-ept.c's only use passes zero anyway, and p2m.c's
> uniform passing of 4 doesn't necessarily match reality afaict.

I agree that we should either fix it properly (so that it reflects
reality), or make it always be the same value.

Well the original reason for keeping track of the different paging
levels in type_info was for PV pagetables, right?  I can't off the top
of my head think of a reason that it's important to keep track of the
different levels for p2m tables.

It probably *is* good to prevent such a page from winding up in a
writeable entry of a PV guest; so maybe following p2m.c's lead and
always setting it to PGT_l4_page_table?

Other than that...


>          new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
>
> -        switch ( type ) {
> -        case PGT_l3_page_table:
> -            p2m_add_iommu_flags(&new_entry, 3, IOMMUF_readable|IOMMUF_writable);
> -            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 4);
> -            break;
> -        case PGT_l2_page_table:
> -            p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
> -            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
> -            break;
> -        case PGT_l1_page_table:
> -            p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
> -            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
> -            break;
> -        default:
> -            BUG();
> -            break;
> -        }
> +        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
> +        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);

...this looks *much better*, thanks!

Reviewed-by: George Dunlap <george.dunlap@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level()
  2017-07-27 16:19   ` George Dunlap
@ 2017-08-11 11:53     ` Jan Beulich
  2017-08-11 12:28     ` Jan Beulich
  1 sibling, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2017-08-11 11:53 UTC (permalink / raw)
  To: George Dunlap; +Cc: Andrew Cooper, xen-devel

>>> On 27.07.17 at 18:19, <George.Dunlap@eu.citrix.com> wrote:
> On Wed, Jul 5, 2017 at 11:05 AM, Jan Beulich <JBeulich@suse.com> wrote:
>> Calculate entry PFN and flags just once. Convert the two successive
>> main if()-s to and if/elf-if chain. Restrict variable scope where
>> reasonable. Take the opportunity and also make the induction variable
>> unsigned.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v2: Re-do mostly from scratch following review feedback.
>> Note: I have trouble seeing how the old code worked, when the 2M page
>>       shattering path specified neither read nor write permission for
>>       the IOMMU. Am I overlooking a reason why this was (and should
>>       remain) so?
> 
> Hmm -- given that in all other circumstances, a 4k page which is
> ram_rw gets RW, then I think the old code must certainly be buggy.
> 
> But is your code correct?  It looks like you unconditionally give it
> RW, when for ram_ro, for example it should be R (not W).  It seems
> like we should either call p2m_get_iommu_flags(), or ASSERT() that the
> resulting flags would be RW.

Hmm, good point, but that means the original code had the same
issue when splitting 1G into 2M pages. I'd prefer to not call
p2m_get_iommu_flags() though, but instead simply inherit the
IOMMU flags from the original large page.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level()
  2017-07-27 16:19   ` George Dunlap
  2017-08-11 11:53     ` Jan Beulich
@ 2017-08-11 12:28     ` Jan Beulich
  1 sibling, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2017-08-11 12:28 UTC (permalink / raw)
  To: George Dunlap; +Cc: Andrew Cooper, xen-devel

>>> On 27.07.17 at 18:19, <George.Dunlap@eu.citrix.com> wrote:
> On Wed, Jul 5, 2017 at 11:05 AM, Jan Beulich <JBeulich@suse.com> wrote:
>> Calculate entry PFN and flags just once. Convert the two successive
>> main if()-s to and if/elf-if chain. Restrict variable scope where
>> reasonable. Take the opportunity and also make the induction variable
>> unsigned.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v2: Re-do mostly from scratch following review feedback.
>> Note: I have trouble seeing how the old code worked, when the 2M page
>>       shattering path specified neither read nor write permission for
>>       the IOMMU. Am I overlooking a reason why this was (and should
>>       remain) so?
> 
> Hmm -- given that in all other circumstances, a 4k page which is
> ram_rw gets RW, then I think the old code must certainly be buggy.

Oh, I think this is buggy in another way than I first thought: It
looks like it's meant to inherit the original flags (as they're not
being cleared from "flags"), but afaict that breaks the next
level field - that one is also being inherited, and then being
OR-ed into with the new next level field. I suppose all of this
has not caused issues simply because we've not allowed
shared page tables on AMD for quite a while (and both fields
are of no interest in the shadow mode case).

I'll drop the use of p2m_add_iommu_flags() in that particular
case altogether.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 3/3] x86/p2m-pt: pass level instead of page type to p2m_next_level()
  2017-07-27 16:50   ` George Dunlap
@ 2017-08-11 13:07     ` Jan Beulich
  0 siblings, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2017-08-11 13:07 UTC (permalink / raw)
  To: George Dunlap; +Cc: Andrew Cooper, xen-devel

>>> On 27.07.17 at 18:50, <George.Dunlap@eu.citrix.com> wrote:
> On Wed, Jul 5, 2017 at 11:06 AM, Jan Beulich <JBeulich@suse.com> wrote:
>> This in turn calls for p2m_alloc_ptp() also being passed the numeric
>> level.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v2: New.
>> ---
>> Question is whether passing the level to p2m_alloc_ptp() is really all
>> that useful: p2m-ept.c's only use passes zero anyway, and p2m.c's
>> uniform passing of 4 doesn't necessarily match reality afaict.
> 
> I agree that we should either fix it properly (so that it reflects
> reality), or make it always be the same value.
> 
> Well the original reason for keeping track of the different paging
> levels in type_info was for PV pagetables, right?  I can't off the top
> of my head think of a reason that it's important to keep track of the
> different levels for p2m tables.
> 
> It probably *is* good to prevent such a page from winding up in a
> writeable entry of a PV guest; so maybe following p2m.c's lead and
> always setting it to PGT_l4_page_table?

Yeah, probably that's the safest we can do.

> Other than that...
> 
> 
>>          new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
>>
>> -        switch ( type ) {
>> -        case PGT_l3_page_table:
>> -            p2m_add_iommu_flags(&new_entry, 3, IOMMUF_readable|IOMMUF_writable);
>> -            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 4);
>> -            break;
>> -        case PGT_l2_page_table:
>> -            p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
>> -            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
>> -            break;
>> -        case PGT_l1_page_table:
>> -            p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
>> -            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
>> -            break;
>> -        default:
>> -            BUG();
>> -            break;
>> -        }
>> +        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
>> +        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
> 
> ...this looks *much better*, thanks!
> 
> Reviewed-by: George Dunlap <george.dunlap@citrix.com>

Thanks!

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-08-11 13:07 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-05 10:02 [PATCH v2 0/3] x86/p2m: some code simplification Jan Beulich
2017-07-05 10:05 ` [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level() Jan Beulich
2017-07-27 16:19   ` George Dunlap
2017-08-11 11:53     ` Jan Beulich
2017-08-11 12:28     ` Jan Beulich
2017-07-05 10:05 ` [PATCH v2 2/3] x86/p2m: make p2m_alloc_ptp() return an MFN Jan Beulich
2017-07-05 10:06 ` [PATCH v2 3/3] x86/p2m-pt: pass level instead of page type to p2m_next_level() Jan Beulich
2017-07-27 16:50   ` George Dunlap
2017-08-11 13:07     ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.