xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code
@ 2019-07-16 12:01 Alexandru Stefan ISAILA
  2019-07-16 12:01 ` [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code Alexandru Stefan ISAILA
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-07-16 12:01 UTC (permalink / raw)
  To: xen-devel
  Cc: jbeulich, wl, george.dunlap, andrew.cooper3,
	suravee.suthikulpanit, Alexandru Stefan ISAILA, brian.woods,
	roger.pau

At this moment IOMMU pt sharing is disabled by commit [1].

This patch aims to clear the IOMMU hap share support as it will not be
used in the future. By doing this the IOMMU bits used in pte[52:58] can
be used in other ways.

[1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555

Suggested-by: George Dunlap <george.dunlap@citrix.com>
Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V1:
	- Rework commit message
	- Reflow comments
	- Move flags init to declaration in p2m_type_to_flags.
---
 xen/arch/x86/mm/p2m-pt.c | 96 +++-------------------------------------
 1 file changed, 5 insertions(+), 91 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index cafc9f299b..3a0a500d66 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -24,7 +24,6 @@
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <xen/iommu.h>
 #include <xen/vm_event.h>
 #include <xen/event.h>
 #include <xen/trace.h>
@@ -36,15 +35,13 @@
 #include <asm/p2m.h>
 #include <asm/mem_sharing.h>
 #include <asm/hvm/nestedhvm.h>
-#include <asm/hvm/svm/amd-iommu-proto.h>
 
 #include "mm-locks.h"
 
 /*
  * We may store INVALID_MFN in PTEs.  We need to clip this to avoid trampling
- * over higher-order bits (NX, p2m type, IOMMU flags).  We seem to not need
- * to unclip on the read path, as callers are concerned only with p2m type in
- * such cases.
+ * over higher-order bits (NX, p2m type). We seem to not need to unclip on the
+ * read path, as callers are concerned only with p2m type in such cases.
  */
 #define p2m_l1e_from_pfn(pfn, flags)    \
     l1e_from_pfn((pfn) & (PADDR_MASK >> PAGE_SHIFT), (flags))
@@ -71,13 +68,7 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
                                        mfn_t mfn,
                                        unsigned int level)
 {
-    unsigned long flags;
-    /*
-     * AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be
-     * used for iommu hardware to encode next io page level. Bit 59 - bit 62
-     * are used for iommu flags, We could not use these bits to store p2m types.
-     */
-    flags = (unsigned long)(t & 0x7f) << 12;
+    unsigned long flags = (unsigned long)(t & 0x7f) << 12;
 
     switch(t)
     {
@@ -165,16 +156,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
 // Returns 0 on error.
 //
 
-/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */
-#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21))
-
-static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
-                                unsigned int nlevel, unsigned int flags)
-{
-    if ( iommu_hap_pt_share )
-        l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
-}
-
 /* Returns: 0 for success, -errno for failure */
 static int
 p2m_next_level(struct p2m_domain *p2m, void **table,
@@ -203,7 +184,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
 
-        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
         if ( rc )
             goto error;
@@ -242,13 +222,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
 
         l1_entry = map_domain_page(mfn);
 
-        /* Inherit original IOMMU permissions, but update Next Level. */
-        if ( iommu_hap_pt_share )
-        {
-            flags &= ~iommu_nlevel_to_flags(~0, 0);
-            flags |= iommu_nlevel_to_flags(level - 1, 0);
-        }
-
         for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
         {
             new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
@@ -264,8 +237,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         unmap_domain_page(l1_entry);
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
-        p2m_add_iommu_flags(&new_entry, level,
-                            IOMMUF_readable|IOMMUF_writable);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
                                   level + 1);
         if ( rc )
@@ -470,9 +441,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
             }
 
             e = l1e_from_pfn(mfn, flags);
-            p2m_add_iommu_flags(&e, level,
-                                (nt == p2m_ram_rw)
-                                ? IOMMUF_readable|IOMMUF_writable : 0);
             ASSERT(!needs_recalc(l1, e));
         }
         else
@@ -540,18 +508,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
     l2_pgentry_t l2e_content;
     l3_pgentry_t l3e_content;
     int rc;
-    unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn);
-    /*
-     * old_mfn and iommu_old_flags control possible flush/update needs on the
-     * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
-     * iommu_old_flags being initialized to zero covers the case of the entry
-     * getting replaced being a non-present (leaf or intermediate) one. For
-     * present leaf entries the real value will get calculated below, while
-     * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
-     * will be used (to cover all cases of what the leaf entries underneath
-     * the intermediate one might be).
-     */
-    unsigned int flags, iommu_old_flags = 0;
+    unsigned int flags;
     unsigned long old_mfn = mfn_x(INVALID_MFN);
 
     if ( !sve )
@@ -599,17 +556,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         if ( flags & _PAGE_PRESENT )
         {
             if ( flags & _PAGE_PSE )
-            {
                 old_mfn = l1e_get_pfn(*p2m_entry);
-                iommu_old_flags =
-                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
-                                        _mfn(old_mfn));
-            }
             else
-            {
-                iommu_old_flags = ~0;
                 intermediate_entry = *p2m_entry;
-            }
         }
 
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
@@ -619,9 +568,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
             : l3e_empty();
         entry_content.l1 = l3e_content.l3;
 
-        if ( entry_content.l1 != 0 )
-            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -648,9 +594,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
                                    0, L1_PAGETABLE_ENTRIES);
         ASSERT(p2m_entry);
         old_mfn = l1e_get_pfn(*p2m_entry);
-        iommu_old_flags =
-            p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)),
-                                _mfn(old_mfn));
 
         if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
@@ -658,9 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         else
             entry_content = l1e_empty();
 
-        if ( entry_content.l1 != 0 )
-            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-
         /* level 1 entry */
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -677,17 +617,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         if ( flags & _PAGE_PRESENT )
         {
             if ( flags & _PAGE_PSE )
-            {
                 old_mfn = l1e_get_pfn(*p2m_entry);
-                iommu_old_flags =
-                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
-                                        _mfn(old_mfn));
-            }
             else
-            {
-                iommu_old_flags = ~0;
                 intermediate_entry = *p2m_entry;
-            }
         }
 
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
@@ -697,9 +629,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
             : l2e_empty();
         entry_content.l1 = l2e_content.l2;
 
-        if ( entry_content.l1 != 0 )
-            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -711,24 +640,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
          && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
         p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
 
-    if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags ||
-                           old_mfn != mfn_x(mfn)) )
-    {
-        ASSERT(rc == 0);
-
-        if ( need_iommu_pt_sync(p2m->domain) )
-            rc = iommu_pte_flags ?
-                iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
-                                 iommu_pte_flags) :
-                iommu_legacy_unmap(d, _dfn(gfn), page_order);
-        else if ( iommu_use_hap_pt(d) && iommu_old_flags )
-            amd_iommu_flush_pages(p2m->domain, gfn, page_order);
-    }
-
     /*
      * Free old intermediate tables if necessary.  This has to be the
-     * last thing we do, after removal from the IOMMU tables, so as to
-     * avoid a potential use-after-free.
+     * last thing we do so as to avoid a potential use-after-free.
      */
     if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT )
         p2m_free_entry(p2m, &intermediate_entry, page_order);
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code
  2019-07-16 12:01 [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Alexandru Stefan ISAILA
@ 2019-07-16 12:01 ` Alexandru Stefan ISAILA
  2019-07-24 16:02   ` Jan Beulich
  2019-07-24 19:44   ` Woods, Brian
  2019-07-24 15:58 ` [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Jan Beulich
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 8+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-07-16 12:01 UTC (permalink / raw)
  To: xen-devel
  Cc: jbeulich, wl, george.dunlap, andrew.cooper3,
	suravee.suthikulpanit, Alexandru Stefan ISAILA, brian.woods,
	roger.pau

At this moment IOMMU pt sharing is disabled by commit [1].

This patch cleans the unreachable code garded by iommu_hap_pt_share.

[1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
---
 xen/drivers/passthrough/amd/iommu_map.c       | 28 -------------------
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |  4 ---
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |  3 --
 3 files changed, 35 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index cbf00e9e72..90cc7075c2 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -364,9 +364,6 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
     int rc;
     unsigned long pt_mfn[7];
 
-    if ( iommu_use_hap_pt(d) )
-        return 0;
-
     memset(pt_mfn, 0, sizeof(pt_mfn));
 
     spin_lock(&hd->arch.mapping_lock);
@@ -420,9 +417,6 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
     unsigned long pt_mfn[7];
     struct domain_iommu *hd = dom_iommu(d);
 
-    if ( iommu_use_hap_pt(d) )
-        return 0;
-
     memset(pt_mfn, 0, sizeof(pt_mfn));
 
     spin_lock(&hd->arch.mapping_lock);
@@ -558,28 +552,6 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain,
     return rt;
 }
 
-/* Share p2m table with iommu. */
-void amd_iommu_share_p2m(struct domain *d)
-{
-    struct domain_iommu *hd = dom_iommu(d);
-    struct page_info *p2m_table;
-    mfn_t pgd_mfn;
-
-    pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
-    p2m_table = mfn_to_page(pgd_mfn);
-
-    if ( hd->arch.root_table != p2m_table )
-    {
-        free_amd_iommu_pgtable(hd->arch.root_table);
-        hd->arch.root_table = p2m_table;
-
-        /* When sharing p2m with iommu, paging mode = 4 */
-        hd->arch.paging_mode = 4;
-        AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n",
-                        mfn_x(pgd_mfn));
-    }
-}
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 4afbcd1609..be076210b6 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -396,9 +396,6 @@ static void deallocate_iommu_page_tables(struct domain *d)
 {
     struct domain_iommu *hd = dom_iommu(d);
 
-    if ( iommu_use_hap_pt(d) )
-        return;
-
     spin_lock(&hd->arch.mapping_lock);
     if ( hd->arch.root_table )
     {
@@ -566,7 +563,6 @@ static const struct iommu_ops __initconstrel _iommu_ops = {
     .setup_hpet_msi = amd_setup_hpet_msi,
     .suspend = amd_iommu_suspend,
     .resume = amd_iommu_resume,
-    .share_p2m = amd_iommu_share_p2m,
     .crash_shutdown = amd_iommu_crash_shutdown,
     .dump_p2m_table = amd_dump_p2m_table,
 };
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index e0d5d23978..b832f564a7 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -66,9 +66,6 @@ int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
                                              unsigned int flush_flags);
 int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
 
-/* Share p2m table with iommu */
-void amd_iommu_share_p2m(struct domain *d);
-
 /* device table functions */
 int get_dma_requestor_id(uint16_t seg, uint16_t bdf);
 void amd_iommu_set_intremap_table(struct amd_iommu_dte *dte,
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code
  2019-07-16 12:01 [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Alexandru Stefan ISAILA
  2019-07-16 12:01 ` [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code Alexandru Stefan ISAILA
@ 2019-07-24 15:58 ` Jan Beulich
  2019-07-24 19:44 ` Woods, Brian
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Jan Beulich @ 2019-07-24 15:58 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA
  Cc: wl, george.dunlap, andrew.cooper3, suravee.suthikulpanit,
	xen-devel, brian.woods, roger.pau

On 16.07.2019 14:01, Alexandru Stefan ISAILA wrote:
> At this moment IOMMU pt sharing is disabled by commit [1].
> 
> This patch aims to clear the IOMMU hap share support as it will not be
> used in the future. By doing this the IOMMU bits used in pte[52:58] can
> be used in other ways.
> 
> [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555
> 
> Suggested-by: George Dunlap <george.dunlap@citrix.com>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code
  2019-07-16 12:01 ` [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code Alexandru Stefan ISAILA
@ 2019-07-24 16:02   ` Jan Beulich
  2019-07-24 19:44   ` Woods, Brian
  1 sibling, 0 replies; 8+ messages in thread
From: Jan Beulich @ 2019-07-24 16:02 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA
  Cc: wl, george.dunlap, andrew.cooper3, suravee.suthikulpanit,
	xen-devel, brian.woods, roger.pau

On 16.07.2019 14:01, Alexandru Stefan ISAILA wrote:
> At this moment IOMMU pt sharing is disabled by commit [1].
> 
> This patch cleans the unreachable code garded by iommu_hap_pt_share.
> 
> [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555
> 
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code
  2019-07-16 12:01 [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Alexandru Stefan ISAILA
  2019-07-16 12:01 ` [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code Alexandru Stefan ISAILA
  2019-07-24 15:58 ` [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Jan Beulich
@ 2019-07-24 19:44 ` Woods, Brian
  2019-08-01  8:42 ` Alexandru Stefan ISAILA
  2019-08-14 14:31 ` George Dunlap
  4 siblings, 0 replies; 8+ messages in thread
From: Woods, Brian @ 2019-07-24 19:44 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA
  Cc: Suthikulpanit, Suravee, wl, george.dunlap, andrew.cooper3,
	jbeulich, xen-devel, Woods, Brian, roger.pau

On Tue, Jul 16, 2019 at 12:01:11PM +0000, Alexandru Stefan ISAILA wrote:
> At this moment IOMMU pt sharing is disabled by commit [1].
> 
> This patch aims to clear the IOMMU hap share support as it will not be
> used in the future. By doing this the IOMMU bits used in pte[52:58] can
> be used in other ways.
> 
> [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555
> 
> Suggested-by: George Dunlap <george.dunlap@citrix.com>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Acked-by: Brian Woods <brian.woods@amd.com>

> ---
> Changes since V1:
> 	- Rework commit message
> 	- Reflow comments
> 	- Move flags init to declaration in p2m_type_to_flags.
> ---
>  xen/arch/x86/mm/p2m-pt.c | 96 +++-------------------------------------
>  1 file changed, 5 insertions(+), 91 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index cafc9f299b..3a0a500d66 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -24,7 +24,6 @@
>   * along with this program; If not, see <http://www.gnu.org/licenses/>.
>   */
>  
> -#include <xen/iommu.h>
>  #include <xen/vm_event.h>
>  #include <xen/event.h>
>  #include <xen/trace.h>
> @@ -36,15 +35,13 @@
>  #include <asm/p2m.h>
>  #include <asm/mem_sharing.h>
>  #include <asm/hvm/nestedhvm.h>
> -#include <asm/hvm/svm/amd-iommu-proto.h>
>  
>  #include "mm-locks.h"
>  
>  /*
>   * We may store INVALID_MFN in PTEs.  We need to clip this to avoid trampling
> - * over higher-order bits (NX, p2m type, IOMMU flags).  We seem to not need
> - * to unclip on the read path, as callers are concerned only with p2m type in
> - * such cases.
> + * over higher-order bits (NX, p2m type). We seem to not need to unclip on the
> + * read path, as callers are concerned only with p2m type in such cases.
>   */
>  #define p2m_l1e_from_pfn(pfn, flags)    \
>      l1e_from_pfn((pfn) & (PADDR_MASK >> PAGE_SHIFT), (flags))
> @@ -71,13 +68,7 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>                                         mfn_t mfn,
>                                         unsigned int level)
>  {
> -    unsigned long flags;
> -    /*
> -     * AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be
> -     * used for iommu hardware to encode next io page level. Bit 59 - bit 62
> -     * are used for iommu flags, We could not use these bits to store p2m types.
> -     */
> -    flags = (unsigned long)(t & 0x7f) << 12;
> +    unsigned long flags = (unsigned long)(t & 0x7f) << 12;
>  
>      switch(t)
>      {
> @@ -165,16 +156,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
>  // Returns 0 on error.
>  //
>  
> -/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */
> -#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21))
> -
> -static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
> -                                unsigned int nlevel, unsigned int flags)
> -{
> -    if ( iommu_hap_pt_share )
> -        l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
> -}
> -
>  /* Returns: 0 for success, -errno for failure */
>  static int
>  p2m_next_level(struct p2m_domain *p2m, void **table,
> @@ -203,7 +184,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>  
>          new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
>  
> -        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
>          rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
>          if ( rc )
>              goto error;
> @@ -242,13 +222,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>  
>          l1_entry = map_domain_page(mfn);
>  
> -        /* Inherit original IOMMU permissions, but update Next Level. */
> -        if ( iommu_hap_pt_share )
> -        {
> -            flags &= ~iommu_nlevel_to_flags(~0, 0);
> -            flags |= iommu_nlevel_to_flags(level - 1, 0);
> -        }
> -
>          for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
>          {
>              new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
> @@ -264,8 +237,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>          unmap_domain_page(l1_entry);
>  
>          new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
> -        p2m_add_iommu_flags(&new_entry, level,
> -                            IOMMUF_readable|IOMMUF_writable);
>          rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
>                                    level + 1);
>          if ( rc )
> @@ -470,9 +441,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
>              }
>  
>              e = l1e_from_pfn(mfn, flags);
> -            p2m_add_iommu_flags(&e, level,
> -                                (nt == p2m_ram_rw)
> -                                ? IOMMUF_readable|IOMMUF_writable : 0);
>              ASSERT(!needs_recalc(l1, e));
>          }
>          else
> @@ -540,18 +508,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>      l2_pgentry_t l2e_content;
>      l3_pgentry_t l3e_content;
>      int rc;
> -    unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn);
> -    /*
> -     * old_mfn and iommu_old_flags control possible flush/update needs on the
> -     * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
> -     * iommu_old_flags being initialized to zero covers the case of the entry
> -     * getting replaced being a non-present (leaf or intermediate) one. For
> -     * present leaf entries the real value will get calculated below, while
> -     * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
> -     * will be used (to cover all cases of what the leaf entries underneath
> -     * the intermediate one might be).
> -     */
> -    unsigned int flags, iommu_old_flags = 0;
> +    unsigned int flags;
>      unsigned long old_mfn = mfn_x(INVALID_MFN);
>  
>      if ( !sve )
> @@ -599,17 +556,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>          if ( flags & _PAGE_PRESENT )
>          {
>              if ( flags & _PAGE_PSE )
> -            {
>                  old_mfn = l1e_get_pfn(*p2m_entry);
> -                iommu_old_flags =
> -                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
> -                                        _mfn(old_mfn));
> -            }
>              else
> -            {
> -                iommu_old_flags = ~0;
>                  intermediate_entry = *p2m_entry;
> -            }
>          }
>  
>          check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
> @@ -619,9 +568,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>              : l3e_empty();
>          entry_content.l1 = l3e_content.l3;
>  
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>          rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
>          /* NB: paging_write_p2m_entry() handles tlb flushes properly */
>          if ( rc )
> @@ -648,9 +594,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>                                     0, L1_PAGETABLE_ENTRIES);
>          ASSERT(p2m_entry);
>          old_mfn = l1e_get_pfn(*p2m_entry);
> -        iommu_old_flags =
> -            p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)),
> -                                _mfn(old_mfn));
>  
>          if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
>              entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
> @@ -658,9 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>          else
>              entry_content = l1e_empty();
>  
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>          /* level 1 entry */
>          rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
>          /* NB: paging_write_p2m_entry() handles tlb flushes properly */
> @@ -677,17 +617,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>          if ( flags & _PAGE_PRESENT )
>          {
>              if ( flags & _PAGE_PSE )
> -            {
>                  old_mfn = l1e_get_pfn(*p2m_entry);
> -                iommu_old_flags =
> -                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
> -                                        _mfn(old_mfn));
> -            }
>              else
> -            {
> -                iommu_old_flags = ~0;
>                  intermediate_entry = *p2m_entry;
> -            }
>          }
>  
>          check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
> @@ -697,9 +629,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>              : l2e_empty();
>          entry_content.l1 = l2e_content.l2;
>  
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>          rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
>          /* NB: paging_write_p2m_entry() handles tlb flushes properly */
>          if ( rc )
> @@ -711,24 +640,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>           && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
>          p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
>  
> -    if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags ||
> -                           old_mfn != mfn_x(mfn)) )
> -    {
> -        ASSERT(rc == 0);
> -
> -        if ( need_iommu_pt_sync(p2m->domain) )
> -            rc = iommu_pte_flags ?
> -                iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
> -                                 iommu_pte_flags) :
> -                iommu_legacy_unmap(d, _dfn(gfn), page_order);
> -        else if ( iommu_use_hap_pt(d) && iommu_old_flags )
> -            amd_iommu_flush_pages(p2m->domain, gfn, page_order);
> -    }
> -
>      /*
>       * Free old intermediate tables if necessary.  This has to be the
> -     * last thing we do, after removal from the IOMMU tables, so as to
> -     * avoid a potential use-after-free.
> +     * last thing we do so as to avoid a potential use-after-free.
>       */
>      if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT )
>          p2m_free_entry(p2m, &intermediate_entry, page_order);
> -- 
> 2.17.1
> 

-- 
Brian Woods

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code
  2019-07-16 12:01 ` [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code Alexandru Stefan ISAILA
  2019-07-24 16:02   ` Jan Beulich
@ 2019-07-24 19:44   ` Woods, Brian
  1 sibling, 0 replies; 8+ messages in thread
From: Woods, Brian @ 2019-07-24 19:44 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA
  Cc: Suthikulpanit, Suravee, wl, george.dunlap, andrew.cooper3,
	jbeulich, xen-devel, Woods, Brian, roger.pau

On Tue, Jul 16, 2019 at 12:01:15PM +0000, Alexandru Stefan ISAILA wrote:
> At this moment IOMMU pt sharing is disabled by commit [1].
> 
> This patch cleans the unreachable code garded by iommu_hap_pt_share.
> 
> [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555
> 
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Acked-by: Brian Woods <brian.woods@amd.com>

> ---
>  xen/drivers/passthrough/amd/iommu_map.c       | 28 -------------------
>  xen/drivers/passthrough/amd/pci_amd_iommu.c   |  4 ---
>  xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |  3 --
>  3 files changed, 35 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
> index cbf00e9e72..90cc7075c2 100644
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -364,9 +364,6 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
>      int rc;
>      unsigned long pt_mfn[7];
>  
> -    if ( iommu_use_hap_pt(d) )
> -        return 0;
> -
>      memset(pt_mfn, 0, sizeof(pt_mfn));
>  
>      spin_lock(&hd->arch.mapping_lock);
> @@ -420,9 +417,6 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
>      unsigned long pt_mfn[7];
>      struct domain_iommu *hd = dom_iommu(d);
>  
> -    if ( iommu_use_hap_pt(d) )
> -        return 0;
> -
>      memset(pt_mfn, 0, sizeof(pt_mfn));
>  
>      spin_lock(&hd->arch.mapping_lock);
> @@ -558,28 +552,6 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain,
>      return rt;
>  }
>  
> -/* Share p2m table with iommu. */
> -void amd_iommu_share_p2m(struct domain *d)
> -{
> -    struct domain_iommu *hd = dom_iommu(d);
> -    struct page_info *p2m_table;
> -    mfn_t pgd_mfn;
> -
> -    pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
> -    p2m_table = mfn_to_page(pgd_mfn);
> -
> -    if ( hd->arch.root_table != p2m_table )
> -    {
> -        free_amd_iommu_pgtable(hd->arch.root_table);
> -        hd->arch.root_table = p2m_table;
> -
> -        /* When sharing p2m with iommu, paging mode = 4 */
> -        hd->arch.paging_mode = 4;
> -        AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n",
> -                        mfn_x(pgd_mfn));
> -    }
> -}
> -
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> index 4afbcd1609..be076210b6 100644
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> @@ -396,9 +396,6 @@ static void deallocate_iommu_page_tables(struct domain *d)
>  {
>      struct domain_iommu *hd = dom_iommu(d);
>  
> -    if ( iommu_use_hap_pt(d) )
> -        return;
> -
>      spin_lock(&hd->arch.mapping_lock);
>      if ( hd->arch.root_table )
>      {
> @@ -566,7 +563,6 @@ static const struct iommu_ops __initconstrel _iommu_ops = {
>      .setup_hpet_msi = amd_setup_hpet_msi,
>      .suspend = amd_iommu_suspend,
>      .resume = amd_iommu_resume,
> -    .share_p2m = amd_iommu_share_p2m,
>      .crash_shutdown = amd_iommu_crash_shutdown,
>      .dump_p2m_table = amd_dump_p2m_table,
>  };
> diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> index e0d5d23978..b832f564a7 100644
> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> @@ -66,9 +66,6 @@ int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
>                                               unsigned int flush_flags);
>  int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
>  
> -/* Share p2m table with iommu */
> -void amd_iommu_share_p2m(struct domain *d);
> -
>  /* device table functions */
>  int get_dma_requestor_id(uint16_t seg, uint16_t bdf);
>  void amd_iommu_set_intremap_table(struct amd_iommu_dte *dte,
> -- 
> 2.17.1
> 

-- 
Brian Woods

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code
  2019-07-16 12:01 [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Alexandru Stefan ISAILA
                   ` (2 preceding siblings ...)
  2019-07-24 19:44 ` Woods, Brian
@ 2019-08-01  8:42 ` Alexandru Stefan ISAILA
  2019-08-14 14:31 ` George Dunlap
  4 siblings, 0 replies; 8+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-08-01  8:42 UTC (permalink / raw)
  To: xen-devel, george.dunlap
  Cc: jbeulich, wl, andrew.cooper3, suravee.suthikulpanit, brian.woods,
	roger.pau

Hi George,

Did you get a chance to look at this clean-up?

Thanks,
Alex

On 16.07.2019 15:01, Alexandru Stefan ISAILA wrote:
> At this moment IOMMU pt sharing is disabled by commit [1].
> 
> This patch aims to clear the IOMMU hap share support as it will not be
> used in the future. By doing this the IOMMU bits used in pte[52:58] can
> be used in other ways.
> 
> [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555
> 
> Suggested-by: George Dunlap <george.dunlap@citrix.com>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> 
> ---
> Changes since V1:
> 	- Rework commit message
> 	- Reflow comments
> 	- Move flags init to declaration in p2m_type_to_flags.
> ---
>   xen/arch/x86/mm/p2m-pt.c | 96 +++-------------------------------------
>   1 file changed, 5 insertions(+), 91 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index cafc9f299b..3a0a500d66 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -24,7 +24,6 @@
>    * along with this program; If not, see <http://www.gnu.org/licenses/>.
>    */
>   
> -#include <xen/iommu.h>
>   #include <xen/vm_event.h>
>   #include <xen/event.h>
>   #include <xen/trace.h>
> @@ -36,15 +35,13 @@
>   #include <asm/p2m.h>
>   #include <asm/mem_sharing.h>
>   #include <asm/hvm/nestedhvm.h>
> -#include <asm/hvm/svm/amd-iommu-proto.h>
>   
>   #include "mm-locks.h"
>   
>   /*
>    * We may store INVALID_MFN in PTEs.  We need to clip this to avoid trampling
> - * over higher-order bits (NX, p2m type, IOMMU flags).  We seem to not need
> - * to unclip on the read path, as callers are concerned only with p2m type in
> - * such cases.
> + * over higher-order bits (NX, p2m type). We seem to not need to unclip on the
> + * read path, as callers are concerned only with p2m type in such cases.
>    */
>   #define p2m_l1e_from_pfn(pfn, flags)    \
>       l1e_from_pfn((pfn) & (PADDR_MASK >> PAGE_SHIFT), (flags))
> @@ -71,13 +68,7 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>                                          mfn_t mfn,
>                                          unsigned int level)
>   {
> -    unsigned long flags;
> -    /*
> -     * AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be
> -     * used for iommu hardware to encode next io page level. Bit 59 - bit 62
> -     * are used for iommu flags, We could not use these bits to store p2m types.
> -     */
> -    flags = (unsigned long)(t & 0x7f) << 12;
> +    unsigned long flags = (unsigned long)(t & 0x7f) << 12;
>   
>       switch(t)
>       {
> @@ -165,16 +156,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
>   // Returns 0 on error.
>   //
>   
> -/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */
> -#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21))
> -
> -static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
> -                                unsigned int nlevel, unsigned int flags)
> -{
> -    if ( iommu_hap_pt_share )
> -        l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
> -}
> -
>   /* Returns: 0 for success, -errno for failure */
>   static int
>   p2m_next_level(struct p2m_domain *p2m, void **table,
> @@ -203,7 +184,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>   
>           new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
>   
> -        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
>           if ( rc )
>               goto error;
> @@ -242,13 +222,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>   
>           l1_entry = map_domain_page(mfn);
>   
> -        /* Inherit original IOMMU permissions, but update Next Level. */
> -        if ( iommu_hap_pt_share )
> -        {
> -            flags &= ~iommu_nlevel_to_flags(~0, 0);
> -            flags |= iommu_nlevel_to_flags(level - 1, 0);
> -        }
> -
>           for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
>           {
>               new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
> @@ -264,8 +237,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>           unmap_domain_page(l1_entry);
>   
>           new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
> -        p2m_add_iommu_flags(&new_entry, level,
> -                            IOMMUF_readable|IOMMUF_writable);
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
>                                     level + 1);
>           if ( rc )
> @@ -470,9 +441,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
>               }
>   
>               e = l1e_from_pfn(mfn, flags);
> -            p2m_add_iommu_flags(&e, level,
> -                                (nt == p2m_ram_rw)
> -                                ? IOMMUF_readable|IOMMUF_writable : 0);
>               ASSERT(!needs_recalc(l1, e));
>           }
>           else
> @@ -540,18 +508,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>       l2_pgentry_t l2e_content;
>       l3_pgentry_t l3e_content;
>       int rc;
> -    unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn);
> -    /*
> -     * old_mfn and iommu_old_flags control possible flush/update needs on the
> -     * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
> -     * iommu_old_flags being initialized to zero covers the case of the entry
> -     * getting replaced being a non-present (leaf or intermediate) one. For
> -     * present leaf entries the real value will get calculated below, while
> -     * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
> -     * will be used (to cover all cases of what the leaf entries underneath
> -     * the intermediate one might be).
> -     */
> -    unsigned int flags, iommu_old_flags = 0;
> +    unsigned int flags;
>       unsigned long old_mfn = mfn_x(INVALID_MFN);
>   
>       if ( !sve )
> @@ -599,17 +556,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>           if ( flags & _PAGE_PRESENT )
>           {
>               if ( flags & _PAGE_PSE )
> -            {
>                   old_mfn = l1e_get_pfn(*p2m_entry);
> -                iommu_old_flags =
> -                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
> -                                        _mfn(old_mfn));
> -            }
>               else
> -            {
> -                iommu_old_flags = ~0;
>                   intermediate_entry = *p2m_entry;
> -            }
>           }
>   
>           check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
> @@ -619,9 +568,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>               : l3e_empty();
>           entry_content.l1 = l3e_content.l3;
>   
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
>           /* NB: paging_write_p2m_entry() handles tlb flushes properly */
>           if ( rc )
> @@ -648,9 +594,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>                                      0, L1_PAGETABLE_ENTRIES);
>           ASSERT(p2m_entry);
>           old_mfn = l1e_get_pfn(*p2m_entry);
> -        iommu_old_flags =
> -            p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)),
> -                                _mfn(old_mfn));
>   
>           if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
>               entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
> @@ -658,9 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>           else
>               entry_content = l1e_empty();
>   
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>           /* level 1 entry */
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
>           /* NB: paging_write_p2m_entry() handles tlb flushes properly */
> @@ -677,17 +617,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>           if ( flags & _PAGE_PRESENT )
>           {
>               if ( flags & _PAGE_PSE )
> -            {
>                   old_mfn = l1e_get_pfn(*p2m_entry);
> -                iommu_old_flags =
> -                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
> -                                        _mfn(old_mfn));
> -            }
>               else
> -            {
> -                iommu_old_flags = ~0;
>                   intermediate_entry = *p2m_entry;
> -            }
>           }
>   
>           check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
> @@ -697,9 +629,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>               : l2e_empty();
>           entry_content.l1 = l2e_content.l2;
>   
> -        if ( entry_content.l1 != 0 )
> -            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> -
>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
>           /* NB: paging_write_p2m_entry() handles tlb flushes properly */
>           if ( rc )
> @@ -711,24 +640,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>            && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
>           p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
>   
> -    if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags ||
> -                           old_mfn != mfn_x(mfn)) )
> -    {
> -        ASSERT(rc == 0);
> -
> -        if ( need_iommu_pt_sync(p2m->domain) )
> -            rc = iommu_pte_flags ?
> -                iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
> -                                 iommu_pte_flags) :
> -                iommu_legacy_unmap(d, _dfn(gfn), page_order);
> -        else if ( iommu_use_hap_pt(d) && iommu_old_flags )
> -            amd_iommu_flush_pages(p2m->domain, gfn, page_order);
> -    }
> -
>       /*
>        * Free old intermediate tables if necessary.  This has to be the
> -     * last thing we do, after removal from the IOMMU tables, so as to
> -     * avoid a potential use-after-free.
> +     * last thing we do so as to avoid a potential use-after-free.
>        */
>       if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT )
>           p2m_free_entry(p2m, &intermediate_entry, page_order);
> 
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code
  2019-07-16 12:01 [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Alexandru Stefan ISAILA
                   ` (3 preceding siblings ...)
  2019-08-01  8:42 ` Alexandru Stefan ISAILA
@ 2019-08-14 14:31 ` George Dunlap
  4 siblings, 0 replies; 8+ messages in thread
From: George Dunlap @ 2019-08-14 14:31 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA, xen-devel
  Cc: jbeulich, wl, george.dunlap, andrew.cooper3,
	suravee.suthikulpanit, brian.woods, roger.pau

On 7/16/19 1:01 PM, Alexandru Stefan ISAILA wrote:
> At this moment IOMMU pt sharing is disabled by commit [1].
> 
> This patch aims to clear the IOMMU hap share support as it will not be
> used in the future. By doing this the IOMMU bits used in pte[52:58] can
> be used in other ways.
> 
> [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555
> 
> Suggested-by: George Dunlap <george.dunlap@citrix.com>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Sorry for the delay:

Reviewed-by: George Dunlap <george.dunlap@citrix.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2019-08-14 14:31 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-16 12:01 [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Alexandru Stefan ISAILA
2019-07-16 12:01 ` [Xen-devel] [PATCH v3 2/2] passthrough/amd: Clean iommu_hap_pt_share enabled code Alexandru Stefan ISAILA
2019-07-24 16:02   ` Jan Beulich
2019-07-24 19:44   ` Woods, Brian
2019-07-24 15:58 ` [Xen-devel] [PATCH v3 1/2] x86/mm: Clean IOMMU flags from p2m-pt code Jan Beulich
2019-07-24 19:44 ` Woods, Brian
2019-08-01  8:42 ` Alexandru Stefan ISAILA
2019-08-14 14:31 ` George Dunlap

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).