Xen-Devel Archive on lore.kernel.org
 help / color / Atom feed
* [Xen-devel] [PATCH v1] x86/mm: Clean IOMMU flags from p2m-pt code
@ 2019-06-18 11:54 Alexandru Stefan ISAILA
  2019-06-25  8:03 ` Alexandru Stefan ISAILA
  0 siblings, 1 reply; 3+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-06-18 11:54 UTC (permalink / raw)
  To: xen-devel
  Cc: wl, george.dunlap, andrew.cooper3, jbeulich,
	Alexandru Stefan ISAILA, roger.pau

At the moment the IOMMU flags are not used in p2m-pt and could be used
on other application.

This patch aims to clean the use of IOMMU flags on the AMD p2m side.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
Suggested-by: George Dunlap <george.dunlap@citrix.com>
---
 xen/arch/x86/mm/p2m-pt.c | 85 ++--------------------------------------
 1 file changed, 3 insertions(+), 82 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index cafc9f299b..ce6d7cdf9b 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -24,7 +24,6 @@
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <xen/iommu.h>
 #include <xen/vm_event.h>
 #include <xen/event.h>
 #include <xen/trace.h>
@@ -36,13 +35,12 @@
 #include <asm/p2m.h>
 #include <asm/mem_sharing.h>
 #include <asm/hvm/nestedhvm.h>
-#include <asm/hvm/svm/amd-iommu-proto.h>
 
 #include "mm-locks.h"
 
 /*
  * We may store INVALID_MFN in PTEs.  We need to clip this to avoid trampling
- * over higher-order bits (NX, p2m type, IOMMU flags).  We seem to not need
+ * over higher-order bits (NX, p2m type).  We seem to not need
  * to unclip on the read path, as callers are concerned only with p2m type in
  * such cases.
  */
@@ -165,16 +163,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
 // Returns 0 on error.
 //
 
-/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */
-#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21))
-
-static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
-                                unsigned int nlevel, unsigned int flags)
-{
-    if ( iommu_hap_pt_share )
-        l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
-}
-
 /* Returns: 0 for success, -errno for failure */
 static int
 p2m_next_level(struct p2m_domain *p2m, void **table,
@@ -203,7 +191,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
 
-        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
         if ( rc )
             goto error;
@@ -242,13 +229,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
 
         l1_entry = map_domain_page(mfn);
 
-        /* Inherit original IOMMU permissions, but update Next Level. */
-        if ( iommu_hap_pt_share )
-        {
-            flags &= ~iommu_nlevel_to_flags(~0, 0);
-            flags |= iommu_nlevel_to_flags(level - 1, 0);
-        }
-
         for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
         {
             new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
@@ -264,8 +244,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         unmap_domain_page(l1_entry);
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
-        p2m_add_iommu_flags(&new_entry, level,
-                            IOMMUF_readable|IOMMUF_writable);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
                                   level + 1);
         if ( rc )
@@ -470,9 +448,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
             }
 
             e = l1e_from_pfn(mfn, flags);
-            p2m_add_iommu_flags(&e, level,
-                                (nt == p2m_ram_rw)
-                                ? IOMMUF_readable|IOMMUF_writable : 0);
             ASSERT(!needs_recalc(l1, e));
         }
         else
@@ -540,18 +515,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
     l2_pgentry_t l2e_content;
     l3_pgentry_t l3e_content;
     int rc;
-    unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn);
-    /*
-     * old_mfn and iommu_old_flags control possible flush/update needs on the
-     * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
-     * iommu_old_flags being initialized to zero covers the case of the entry
-     * getting replaced being a non-present (leaf or intermediate) one. For
-     * present leaf entries the real value will get calculated below, while
-     * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
-     * will be used (to cover all cases of what the leaf entries underneath
-     * the intermediate one might be).
-     */
-    unsigned int flags, iommu_old_flags = 0;
+    unsigned int flags;
     unsigned long old_mfn = mfn_x(INVALID_MFN);
 
     if ( !sve )
@@ -599,17 +563,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         if ( flags & _PAGE_PRESENT )
         {
             if ( flags & _PAGE_PSE )
-            {
                 old_mfn = l1e_get_pfn(*p2m_entry);
-                iommu_old_flags =
-                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
-                                        _mfn(old_mfn));
-            }
             else
-            {
-                iommu_old_flags = ~0;
                 intermediate_entry = *p2m_entry;
-            }
         }
 
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
@@ -619,9 +575,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
             : l3e_empty();
         entry_content.l1 = l3e_content.l3;
 
-        if ( entry_content.l1 != 0 )
-            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -648,9 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
                                    0, L1_PAGETABLE_ENTRIES);
         ASSERT(p2m_entry);
         old_mfn = l1e_get_pfn(*p2m_entry);
-        iommu_old_flags =
-            p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)),
-                                _mfn(old_mfn));
 
         if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
@@ -658,9 +608,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         else
             entry_content = l1e_empty();
 
-        if ( entry_content.l1 != 0 )
-            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-
         /* level 1 entry */
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -677,17 +624,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         if ( flags & _PAGE_PRESENT )
         {
             if ( flags & _PAGE_PSE )
-            {
                 old_mfn = l1e_get_pfn(*p2m_entry);
-                iommu_old_flags =
-                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
-                                        _mfn(old_mfn));
-            }
             else
-            {
-                iommu_old_flags = ~0;
                 intermediate_entry = *p2m_entry;
-            }
         }
 
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
@@ -697,9 +636,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
             : l2e_empty();
         entry_content.l1 = l2e_content.l2;
 
-        if ( entry_content.l1 != 0 )
-            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -711,24 +647,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
          && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
         p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
 
-    if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags ||
-                           old_mfn != mfn_x(mfn)) )
-    {
-        ASSERT(rc == 0);
-
-        if ( need_iommu_pt_sync(p2m->domain) )
-            rc = iommu_pte_flags ?
-                iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
-                                 iommu_pte_flags) :
-                iommu_legacy_unmap(d, _dfn(gfn), page_order);
-        else if ( iommu_use_hap_pt(d) && iommu_old_flags )
-            amd_iommu_flush_pages(p2m->domain, gfn, page_order);
-    }
-
     /*
      * Free old intermediate tables if necessary.  This has to be the
-     * last thing we do, after removal from the IOMMU tables, so as to
-     * avoid a potential use-after-free.
+     * last thing we do so as to avoid a potential use-after-free.
      */
     if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT )
         p2m_free_entry(p2m, &intermediate_entry, page_order);
-- 
2.17.1

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [Xen-devel] [PATCH v1] x86/mm: Clean IOMMU flags from p2m-pt code
  2019-06-18 11:54 [Xen-devel] [PATCH v1] x86/mm: Clean IOMMU flags from p2m-pt code Alexandru Stefan ISAILA
@ 2019-06-25  8:03 ` Alexandru Stefan ISAILA
  0 siblings, 0 replies; 3+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-06-25  8:03 UTC (permalink / raw)
  To: xen-devel, george.dunlap, andrew.cooper3; +Cc: wl, jbeulich, roger.pau

Are there any thoughts on this patch?

Thanks,
Alex

On 18.06.2019 14:54, Alexandru Stefan ISAILA wrote:
> At the moment the IOMMU flags are not used in p2m-pt and could be used
> on other application.
> 
> This patch aims to clean the use of IOMMU flags on the AMD p2m side.
> 
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> Suggested-by: George Dunlap <george.dunlap@citrix.com>
> ---
>   xen/arch/x86/mm/p2m-pt.c | 85 ++--------------------------------------
>   1 file changed, 3 insertions(+), 82 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index cafc9f299b..ce6d7cdf9b 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -24,7 +24,6 @@
>    * along with this program; If not, see <http://www.gnu.org/licenses/>.
>    */
>   
> -#include <xen/iommu.h>
>   #include <xen/vm_event.h>
>   #include <xen/event.h>
>   #include <xen/trace.h>
> @@ -36,13 +35,12 @@
>   #include <asm/p2m.h>
>   #include <asm/mem_sharing.h>
>   #include <asm/hvm/nestedhvm.h>
> -#include <asm/hvm/svm/amd-iommu-proto.h>
>   
>   #include "mm-locks.h"
>   
>   /*
>    * We may store INVALID_MFN in PTEs.  We need to clip this to avoid trampling
> - * over higher-order bits (NX, p2m type, IOMMU flags).  We seem to not need
> + * over higher-order bits (NX, p2m type).  We seem to not need
>    * to unclip on the read path, as callers are concerned only with p2m type in
>    * such cases.
>    */
> @@ -165,16 +163,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
>   // Returns 0 on error.
>   //
>   
> -/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */
> -#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21))
> -
> -static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
> -                                unsigned int nlevel, unsigned int flags)
> -{
> -    if ( iommu_hap_pt_share )
> -        l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
> -}
> -
>   /* Returns: 0 for success, -errno for failure */
>   static int
>   p2m_next_level(struct p2m_domain *p2m, void **table,
> @@ -203,7 +191,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>   
>           new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
>   
> -        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
>           if ( rc )
>               goto error;
> @@ -242,13 +229,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>   
>           l1_entry = map_domain_page(mfn);
>   
> -        /* Inherit original IOMMU permissions, but update Next Level. */
> -        if ( iommu_hap_pt_share )
> -        {
> -            flags &= ~iommu_nlevel_to_flags(~0, 0);
> -            flags |= iommu_nlevel_to_flags(level - 1, 0);
> -        }
> -
>           for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
>           {
>               new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
> @@ -264,8 +244,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>           unmap_domain_page(l1_entry);
>   
>           new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
> -        p2m_add_iommu_flags(&new_entry, level,
> -                            IOMMUF_readable|IOMMUF_writable);
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
>                                     level + 1);
>           if ( rc )
> @@ -470,9 +448,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
>               }
>   
>               e = l1e_from_pfn(mfn, flags);
> -            p2m_add_iommu_flags(&e, level,
> -                                (nt == p2m_ram_rw)
> -                                ? IOMMUF_readable|IOMMUF_writable : 0);
>               ASSERT(!needs_recalc(l1, e));
>           }
>           else
> @@ -540,18 +515,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>       l2_pgentry_t l2e_content;
>       l3_pgentry_t l3e_content;
>       int rc;
> -    unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn);
> -    /*
> -     * old_mfn and iommu_old_flags control possible flush/update needs on the
> -     * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
> -     * iommu_old_flags being initialized to zero covers the case of the entry
> -     * getting replaced being a non-present (leaf or intermediate) one. For
> -     * present leaf entries the real value will get calculated below, while
> -     * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
> -     * will be used (to cover all cases of what the leaf entries underneath
> -     * the intermediate one might be).
> -     */
> -    unsigned int flags, iommu_old_flags = 0;
> +    unsigned int flags;
>       unsigned long old_mfn = mfn_x(INVALID_MFN);
>   
>       if ( !sve )
> @@ -599,17 +563,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>           if ( flags & _PAGE_PRESENT )
>           {
>               if ( flags & _PAGE_PSE )
> -            {
>                   old_mfn = l1e_get_pfn(*p2m_entry);
> -                iommu_old_flags =
> -                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
> -                                        _mfn(old_mfn));
> -            }
>               else
> -            {
> -                iommu_old_flags = ~0;
>                   intermediate_entry = *p2m_entry;
> -            }
>           }
>   
>           check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
> @@ -619,9 +575,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>               : l3e_empty();
>           entry_content.l1 = l3e_content.l3;
>   
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
>           /* NB: paging_write_p2m_entry() handles tlb flushes properly */
>           if ( rc )
> @@ -648,9 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>                                      0, L1_PAGETABLE_ENTRIES);
>           ASSERT(p2m_entry);
>           old_mfn = l1e_get_pfn(*p2m_entry);
> -        iommu_old_flags =
> -            p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)),
> -                                _mfn(old_mfn));
>   
>           if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
>               entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
> @@ -658,9 +608,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>           else
>               entry_content = l1e_empty();
>   
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>           /* level 1 entry */
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
>           /* NB: paging_write_p2m_entry() handles tlb flushes properly */
> @@ -677,17 +624,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>           if ( flags & _PAGE_PRESENT )
>           {
>               if ( flags & _PAGE_PSE )
> -            {
>                   old_mfn = l1e_get_pfn(*p2m_entry);
> -                iommu_old_flags =
> -                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
> -                                        _mfn(old_mfn));
> -            }
>               else
> -            {
> -                iommu_old_flags = ~0;
>                   intermediate_entry = *p2m_entry;
> -            }
>           }
>   
>           check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
> @@ -697,9 +636,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>               : l2e_empty();
>           entry_content.l1 = l2e_content.l2;
>   
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
>           /* NB: paging_write_p2m_entry() handles tlb flushes properly */
>           if ( rc )
> @@ -711,24 +647,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>            && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
>           p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
>   
> -    if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags ||
> -                           old_mfn != mfn_x(mfn)) )
> -    {
> -        ASSERT(rc == 0);
> -
> -        if ( need_iommu_pt_sync(p2m->domain) )
> -            rc = iommu_pte_flags ?
> -                iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
> -                                 iommu_pte_flags) :
> -                iommu_legacy_unmap(d, _dfn(gfn), page_order);
> -        else if ( iommu_use_hap_pt(d) && iommu_old_flags )
> -            amd_iommu_flush_pages(p2m->domain, gfn, page_order);
> -    }
> -
>       /*
>        * Free old intermediate tables if necessary.  This has to be the
> -     * last thing we do, after removal from the IOMMU tables, so as to
> -     * avoid a potential use-after-free.
> +     * last thing we do so as to avoid a potential use-after-free.
>        */
>       if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT )
>           p2m_free_entry(p2m, &intermediate_entry, page_order);
> 
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [Xen-devel] [PATCH v1] x86/mm: Clean IOMMU flags from p2m-pt code
@ 2019-07-01 13:27 Jan Beulich
  0 siblings, 0 replies; 3+ messages in thread
From: Jan Beulich @ 2019-07-01 13:27 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA
  Cc: George Dunlap, xen-devel, Roger Pau Monne, Wei Liu, Andrew Cooper

>>> On 18.06.19 at 13:54, <aisaila@bitdefender.com> wrote:
> At the moment the IOMMU flags are not used in p2m-pt and could be used
> on other application.

I don't understand the second half of this sentence. I guess anyway
that you want to at least mention the fact that CPU and IOMMU page
tables can't be shared anymore, ideally citing the commit that made
this impossible.

> This patch aims to clean the use of IOMMU flags on the AMD p2m side.
> 
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> Suggested-by: George Dunlap <george.dunlap@citrix.com>

Nit: The natural order is the other way around, showing the flow
of events.

> @@ -36,13 +35,12 @@
>  #include <asm/p2m.h>
>  #include <asm/mem_sharing.h>
>  #include <asm/hvm/nestedhvm.h>
> -#include <asm/hvm/svm/amd-iommu-proto.h>
>  
>  #include "mm-locks.h"
>  
>  /*
>   * We may store INVALID_MFN in PTEs.  We need to clip this to avoid trampling
> - * over higher-order bits (NX, p2m type, IOMMU flags).  We seem to not need
> + * over higher-order bits (NX, p2m type).  We seem to not need
>   * to unclip on the read path, as callers are concerned only with p2m type in
>   * such cases.
>   */

Please re-flow the remainder of the comment.

> @@ -165,16 +163,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t 

Below the previous but ahead of this hunk there's p2m_type_to_flags(),
which has a comment that looks to want adjustment (removal?) as well.

Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, back to index

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-06-18 11:54 [Xen-devel] [PATCH v1] x86/mm: Clean IOMMU flags from p2m-pt code Alexandru Stefan ISAILA
2019-06-25  8:03 ` Alexandru Stefan ISAILA
2019-07-01 13:27 Jan Beulich

Xen-Devel Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/xen-devel/0 xen-devel/git/0.git
	git clone --mirror https://lore.kernel.org/xen-devel/1 xen-devel/git/1.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 xen-devel xen-devel/ https://lore.kernel.org/xen-devel \
		xen-devel@lists.xenproject.org xen-devel@archiver.kernel.org
	public-inbox-index xen-devel


Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.xenproject.lists.xen-devel


AGPL code for this site: git clone https://public-inbox.org/ public-inbox