xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul@xen.org>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <pdurrant@amazon.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v4 04/14] x86/iommu: convert AMD IOMMU code to use new page table allocator
Date: Tue,  4 Aug 2020 14:41:59 +0100	[thread overview]
Message-ID: <20200804134209.8717-5-paul@xen.org> (raw)
In-Reply-To: <20200804134209.8717-1-paul@xen.org>

From: Paul Durrant <pdurrant@amazon.com>

This patch converts the AMD IOMMU code to use the new page table allocator
function. This allows all the free-ing code to be removed (since it is now
handled by the general x86 code) which reduces TLB and cache thrashing as well
as shortening the code.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
Cc: Andrew Cooper <andrew.cooper3@citrix.com>

v2:
  - New in v2 (split from "add common page-table allocator")
---
 xen/drivers/passthrough/amd/iommu.h         | 18 +----
 xen/drivers/passthrough/amd/iommu_map.c     | 10 +--
 xen/drivers/passthrough/amd/pci_amd_iommu.c | 75 +++------------------
 3 files changed, 16 insertions(+), 87 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu.h b/xen/drivers/passthrough/amd/iommu.h
index 3489c2a015..e2d174f3b4 100644
--- a/xen/drivers/passthrough/amd/iommu.h
+++ b/xen/drivers/passthrough/amd/iommu.h
@@ -226,7 +226,7 @@ int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
                                     unsigned int *flush_flags);
 int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
                                       unsigned int *flush_flags);
-int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
+int __must_check amd_iommu_alloc_root(struct domain *d);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
                                        paddr_t phys_addr, unsigned long size,
                                        int iw, int ir);
@@ -356,22 +356,6 @@ static inline int amd_iommu_get_paging_mode(unsigned long max_frames)
     return level;
 }
 
-static inline struct page_info *alloc_amd_iommu_pgtable(void)
-{
-    struct page_info *pg = alloc_domheap_page(NULL, 0);
-
-    if ( pg )
-        clear_domain_page(page_to_mfn(pg));
-
-    return pg;
-}
-
-static inline void free_amd_iommu_pgtable(struct page_info *pg)
-{
-    if ( pg )
-        free_domheap_page(pg);
-}
-
 static inline void *__alloc_amd_iommu_tables(unsigned int order)
 {
     return alloc_xenheap_pages(order, 0);
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 47b4472e8a..54b991294a 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -217,7 +217,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn,
             mfn = next_table_mfn;
 
             /* allocate lower level page table */
-            table = alloc_amd_iommu_pgtable();
+            table = iommu_alloc_pgtable(d);
             if ( table == NULL )
             {
                 AMD_IOMMU_DEBUG("Cannot allocate I/O page table\n");
@@ -248,7 +248,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn,
 
             if ( next_table_mfn == 0 )
             {
-                table = alloc_amd_iommu_pgtable();
+                table = iommu_alloc_pgtable(d);
                 if ( table == NULL )
                 {
                     AMD_IOMMU_DEBUG("Cannot allocate I/O page table\n");
@@ -286,7 +286,7 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
 
     spin_lock(&hd->arch.mapping_lock);
 
-    rc = amd_iommu_alloc_root(hd);
+    rc = amd_iommu_alloc_root(d);
     if ( rc )
     {
         spin_unlock(&hd->arch.mapping_lock);
@@ -458,7 +458,7 @@ int __init amd_iommu_quarantine_init(struct domain *d)
 
     spin_lock(&hd->arch.mapping_lock);
 
-    hd->arch.amd.root_table = alloc_amd_iommu_pgtable();
+    hd->arch.amd.root_table = iommu_alloc_pgtable(d);
     if ( !hd->arch.amd.root_table )
         goto out;
 
@@ -473,7 +473,7 @@ int __init amd_iommu_quarantine_init(struct domain *d)
          * page table pages, and the resulting allocations are always
          * zeroed.
          */
-        pg = alloc_amd_iommu_pgtable();
+        pg = iommu_alloc_pgtable(d);
         if ( !pg )
             break;
 
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 09a05f9d75..3390c22cf3 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -205,11 +205,13 @@ static int iov_enable_xt(void)
     return 0;
 }
 
-int amd_iommu_alloc_root(struct domain_iommu *hd)
+int amd_iommu_alloc_root(struct domain *d)
 {
+    struct domain_iommu *hd = dom_iommu(d);
+
     if ( unlikely(!hd->arch.amd.root_table) )
     {
-        hd->arch.amd.root_table = alloc_amd_iommu_pgtable();
+        hd->arch.amd.root_table = iommu_alloc_pgtable(d);
         if ( !hd->arch.amd.root_table )
             return -ENOMEM;
     }
@@ -217,12 +219,13 @@ int amd_iommu_alloc_root(struct domain_iommu *hd)
     return 0;
 }
 
-static int __must_check allocate_domain_resources(struct domain_iommu *hd)
+static int __must_check allocate_domain_resources(struct domain *d)
 {
+    struct domain_iommu *hd = dom_iommu(d);
     int rc;
 
     spin_lock(&hd->arch.mapping_lock);
-    rc = amd_iommu_alloc_root(hd);
+    rc = amd_iommu_alloc_root(d);
     spin_unlock(&hd->arch.mapping_lock);
 
     return rc;
@@ -254,7 +257,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
 {
     const struct amd_iommu *iommu;
 
-    if ( allocate_domain_resources(dom_iommu(d)) )
+    if ( allocate_domain_resources(d) )
         BUG();
 
     for_each_amd_iommu ( iommu )
@@ -323,7 +326,6 @@ static int reassign_device(struct domain *source, struct domain *target,
 {
     struct amd_iommu *iommu;
     int bdf, rc;
-    struct domain_iommu *t = dom_iommu(target);
 
     bdf = PCI_BDF2(pdev->bus, pdev->devfn);
     iommu = find_iommu_for_device(pdev->seg, bdf);
@@ -344,7 +346,7 @@ static int reassign_device(struct domain *source, struct domain *target,
         pdev->domain = target;
     }
 
-    rc = allocate_domain_resources(t);
+    rc = allocate_domain_resources(target);
     if ( rc )
         return rc;
 
@@ -376,65 +378,9 @@ static int amd_iommu_assign_device(struct domain *d, u8 devfn,
     return reassign_device(pdev->domain, d, devfn, pdev);
 }
 
-static void deallocate_next_page_table(struct page_info *pg, int level)
-{
-    PFN_ORDER(pg) = level;
-    spin_lock(&iommu_pt_cleanup_lock);
-    page_list_add_tail(pg, &iommu_pt_cleanup_list);
-    spin_unlock(&iommu_pt_cleanup_lock);
-}
-
-static void deallocate_page_table(struct page_info *pg)
-{
-    struct amd_iommu_pte *table_vaddr;
-    unsigned int index, level = PFN_ORDER(pg);
-
-    PFN_ORDER(pg) = 0;
-
-    if ( level <= 1 )
-    {
-        free_amd_iommu_pgtable(pg);
-        return;
-    }
-
-    table_vaddr = __map_domain_page(pg);
-
-    for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
-    {
-        struct amd_iommu_pte *pde = &table_vaddr[index];
-
-        if ( pde->mfn && pde->next_level && pde->pr )
-        {
-            /* We do not support skip levels yet */
-            ASSERT(pde->next_level == level - 1);
-            deallocate_next_page_table(mfn_to_page(_mfn(pde->mfn)),
-                                       pde->next_level);
-        }
-    }
-
-    unmap_domain_page(table_vaddr);
-    free_amd_iommu_pgtable(pg);
-}
-
-static void deallocate_iommu_page_tables(struct domain *d)
-{
-    struct domain_iommu *hd = dom_iommu(d);
-
-    spin_lock(&hd->arch.mapping_lock);
-    if ( hd->arch.amd.root_table )
-    {
-        deallocate_next_page_table(hd->arch.amd.root_table,
-                                   hd->arch.amd.paging_mode);
-        hd->arch.amd.root_table = NULL;
-    }
-    spin_unlock(&hd->arch.mapping_lock);
-}
-
-
 static void amd_iommu_domain_destroy(struct domain *d)
 {
-    deallocate_iommu_page_tables(d);
-    amd_iommu_flush_all_pages(d);
+    dom_iommu(d)->arch.amd.root_table = NULL;
 }
 
 static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
@@ -620,7 +566,6 @@ static const struct iommu_ops __initconstrel _iommu_ops = {
     .unmap_page = amd_iommu_unmap_page,
     .iotlb_flush = amd_iommu_flush_iotlb_pages,
     .iotlb_flush_all = amd_iommu_flush_iotlb_all,
-    .free_page_table = deallocate_page_table,
     .reassign_device = reassign_device,
     .get_device_group_id = amd_iommu_group_id,
     .enable_x2apic = iov_enable_xt,
-- 
2.20.1



  parent reply	other threads:[~2020-08-04 13:42 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-04 13:41 [PATCH v4 00/14] IOMMU cleanup Paul Durrant
2020-08-04 13:41 ` [PATCH v4 01/14] x86/iommu: re-arrange arch_iommu to separate common fields Paul Durrant
2020-08-14  6:14   ` Tian, Kevin
2020-08-04 13:41 ` [PATCH v4 02/14] x86/iommu: add common page-table allocator Paul Durrant
2020-08-05 15:39   ` Jan Beulich
2020-08-04 13:41 ` [PATCH v4 03/14] x86/iommu: convert VT-d code to use new page table allocator Paul Durrant
2020-08-14  6:41   ` Tian, Kevin
2020-08-14  7:16     ` Durrant, Paul
2020-08-04 13:41 ` Paul Durrant [this message]
2020-08-04 13:42 ` [PATCH v4 05/14] iommu: remove unused iommu_ops method and tasklet Paul Durrant
2020-08-04 13:42 ` [PATCH v4 06/14] iommu: flush I/O TLB if iommu_map() or iommu_unmap() fail Paul Durrant
2020-08-05 16:06   ` Jan Beulich
2020-08-05 16:18     ` Paul Durrant
2020-08-06 11:41   ` Jan Beulich
2020-08-14  6:53   ` Tian, Kevin
2020-08-14  7:19     ` Durrant, Paul
2020-08-04 13:42 ` [PATCH v4 07/14] iommu: make map, unmap and flush all take both an order and a count Paul Durrant
2020-08-06  9:57   ` Jan Beulich
2020-08-11 11:00     ` Durrant, Paul
2020-08-14  6:57     ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 08/14] remove remaining uses of iommu_legacy_map/unmap Paul Durrant
2020-08-06 10:28   ` Jan Beulich
2020-08-12  9:36     ` [EXTERNAL] " Paul Durrant
2020-08-04 13:42 ` [PATCH v4 09/14] common/grant_table: batch flush I/O TLB Paul Durrant
2020-08-06 11:49   ` Jan Beulich
2020-08-04 13:42 ` [PATCH v4 10/14] iommu: remove the share_p2m operation Paul Durrant
2020-08-06 12:18   ` Jan Beulich
2020-08-14  7:04   ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 11/14] iommu: stop calling IOMMU page tables 'p2m tables' Paul Durrant
2020-08-06 12:23   ` Jan Beulich
2020-08-14  7:12   ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 12/14] vtd: use a bit field for root_entry Paul Durrant
2020-08-06 12:34   ` Jan Beulich
2020-08-12 13:13     ` Durrant, Paul
2020-08-18  8:27       ` Jan Beulich
2020-08-14  7:17   ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 13/14] vtd: use a bit field for context_entry Paul Durrant
2020-08-06 12:46   ` Jan Beulich
2020-08-12 13:47     ` Paul Durrant
2020-08-14  7:19   ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 14/14] vtd: use a bit field for dma_pte Paul Durrant
2020-08-06 12:53   ` Jan Beulich
2020-08-12 13:49     ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200804134209.8717-5-paul@xen.org \
    --to=paul@xen.org \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=pdurrant@amazon.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).