All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul@xen.org>
To: xen-devel@lists.xenproject.org
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Paul Durrant" <pdurrant@amazon.com>, "Wei Liu" <wl@xen.org>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH v4 02/14] x86/iommu: add common page-table allocator
Date: Tue,  4 Aug 2020 14:41:57 +0100	[thread overview]
Message-ID: <20200804134209.8717-3-paul@xen.org> (raw)
In-Reply-To: <20200804134209.8717-1-paul@xen.org>

From: Paul Durrant <pdurrant@amazon.com>

Instead of having separate page table allocation functions in VT-d and AMD
IOMMU code, we could use a common allocation function in the general x86 code.

This patch adds a new allocation function, iommu_alloc_pgtable(), for this
purpose. The function adds the page table pages to a list. The pages in this
list are then freed by iommu_free_pgtables(), which is called by
domain_relinquish_resources() after PCI devices have been de-assigned.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wl@xen.org>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>

v4:
 - Remove space between '*' and '__must_check'
 - Reduce frequency of pre-empt check during table freeing
 - Fix parentheses formatting

v2:
 - This is split out from a larger patch of the same name in v1
---
 xen/arch/x86/domain.c               |  9 ++++-
 xen/drivers/passthrough/x86/iommu.c | 51 +++++++++++++++++++++++++++++
 xen/include/asm-x86/iommu.h         |  7 ++++
 3 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f8084dc9e3..d1ecc7b83b 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2153,7 +2153,8 @@ int domain_relinquish_resources(struct domain *d)
         d->arch.rel_priv = PROG_ ## x; /* Fallthrough */ case PROG_ ## x
 
         enum {
-            PROG_paging = 1,
+            PROG_iommu_pagetables = 1,
+            PROG_paging,
             PROG_vcpu_pagetables,
             PROG_shared,
             PROG_xen,
@@ -2168,6 +2169,12 @@ int domain_relinquish_resources(struct domain *d)
         if ( ret )
             return ret;
 
+    PROGRESS(iommu_pagetables):
+
+        ret = iommu_free_pgtables(d);
+        if ( ret )
+            return ret;
+
     PROGRESS(paging):
 
         /* Tear down paging-assistance stuff. */
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index a12109a1de..aea07e47c4 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -140,6 +140,9 @@ int arch_iommu_domain_init(struct domain *d)
 
     spin_lock_init(&hd->arch.mapping_lock);
 
+    INIT_PAGE_LIST_HEAD(&hd->arch.pgtables.list);
+    spin_lock_init(&hd->arch.pgtables.lock);
+
     return 0;
 }
 
@@ -257,6 +260,54 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
         return;
 }
 
+int iommu_free_pgtables(struct domain *d)
+{
+    struct domain_iommu *hd = dom_iommu(d);
+    struct page_info *pg;
+    unsigned int done = 0;
+
+    while ( (pg = page_list_remove_head(&hd->arch.pgtables.list)) )
+    {
+        free_domheap_page(pg);
+
+        if ( !(++done & 0xff) && general_preempt_check() )
+            return -ERESTART;
+    }
+
+    return 0;
+}
+
+struct page_info *iommu_alloc_pgtable(struct domain *d)
+{
+    struct domain_iommu *hd = dom_iommu(d);
+    unsigned int memflags = 0;
+    struct page_info *pg;
+    void *p;
+
+#ifdef CONFIG_NUMA
+    if ( hd->node != NUMA_NO_NODE )
+        memflags = MEMF_node(hd->node);
+#endif
+
+    pg = alloc_domheap_page(NULL, memflags);
+    if ( !pg )
+        return NULL;
+
+    p = __map_domain_page(pg);
+    clear_page(p);
+
+    if ( hd->platform_ops->sync_cache )
+        iommu_vcall(hd->platform_ops, sync_cache, p, PAGE_SIZE);
+
+    unmap_domain_page(p);
+
+    spin_lock(&hd->arch.pgtables.lock);
+    page_list_add(pg, &hd->arch.pgtables.list);
+    spin_unlock(&hd->arch.pgtables.lock);
+
+    return pg;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 8ce97c981f..970eb06ffa 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -46,6 +46,10 @@ typedef uint64_t daddr_t;
 struct arch_iommu
 {
     spinlock_t mapping_lock; /* io page table lock */
+    struct {
+        struct page_list_head list;
+        spinlock_t lock;
+    } pgtables;
 
     union {
         /* Intel VT-d */
@@ -131,6 +135,9 @@ int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq,
         iommu_vcall(ops, sync_cache, addr, size);       \
 })
 
+int __must_check iommu_free_pgtables(struct domain *d);
+struct page_info *__must_check iommu_alloc_pgtable(struct domain *d);
+
 #endif /* !__ARCH_X86_IOMMU_H__ */
 /*
  * Local variables:
-- 
2.20.1



  parent reply	other threads:[~2020-08-04 13:42 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-04 13:41 [PATCH v4 00/14] IOMMU cleanup Paul Durrant
2020-08-04 13:41 ` [PATCH v4 01/14] x86/iommu: re-arrange arch_iommu to separate common fields Paul Durrant
2020-08-14  6:14   ` Tian, Kevin
2020-08-04 13:41 ` Paul Durrant [this message]
2020-08-05 15:39   ` [PATCH v4 02/14] x86/iommu: add common page-table allocator Jan Beulich
2020-08-04 13:41 ` [PATCH v4 03/14] x86/iommu: convert VT-d code to use new page table allocator Paul Durrant
2020-08-14  6:41   ` Tian, Kevin
2020-08-14  7:16     ` Durrant, Paul
2020-08-04 13:41 ` [PATCH v4 04/14] x86/iommu: convert AMD IOMMU " Paul Durrant
2020-08-04 13:42 ` [PATCH v4 05/14] iommu: remove unused iommu_ops method and tasklet Paul Durrant
2020-08-04 13:42 ` [PATCH v4 06/14] iommu: flush I/O TLB if iommu_map() or iommu_unmap() fail Paul Durrant
2020-08-05 16:06   ` Jan Beulich
2020-08-05 16:18     ` Paul Durrant
2020-08-06 11:41   ` Jan Beulich
2020-08-14  6:53   ` Tian, Kevin
2020-08-14  7:19     ` Durrant, Paul
2020-08-04 13:42 ` [PATCH v4 07/14] iommu: make map, unmap and flush all take both an order and a count Paul Durrant
2020-08-06  9:57   ` Jan Beulich
2020-08-11 11:00     ` Durrant, Paul
2020-08-14  6:57     ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 08/14] remove remaining uses of iommu_legacy_map/unmap Paul Durrant
2020-08-06 10:28   ` Jan Beulich
2020-08-12  9:36     ` [EXTERNAL] " Paul Durrant
2020-08-04 13:42 ` [PATCH v4 09/14] common/grant_table: batch flush I/O TLB Paul Durrant
2020-08-06 11:49   ` Jan Beulich
2020-08-04 13:42 ` [PATCH v4 10/14] iommu: remove the share_p2m operation Paul Durrant
2020-08-06 12:18   ` Jan Beulich
2020-08-14  7:04   ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 11/14] iommu: stop calling IOMMU page tables 'p2m tables' Paul Durrant
2020-08-06 12:23   ` Jan Beulich
2020-08-14  7:12   ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 12/14] vtd: use a bit field for root_entry Paul Durrant
2020-08-06 12:34   ` Jan Beulich
2020-08-12 13:13     ` Durrant, Paul
2020-08-18  8:27       ` Jan Beulich
2020-08-14  7:17   ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 13/14] vtd: use a bit field for context_entry Paul Durrant
2020-08-06 12:46   ` Jan Beulich
2020-08-12 13:47     ` Paul Durrant
2020-08-14  7:19   ` Tian, Kevin
2020-08-04 13:42 ` [PATCH v4 14/14] vtd: use a bit field for dma_pte Paul Durrant
2020-08-06 12:53   ` Jan Beulich
2020-08-12 13:49     ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200804134209.8717-3-paul@xen.org \
    --to=paul@xen.org \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=pdurrant@amazon.com \
    --cc=roger.pau@citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.