From: Paul Durrant <paul@xen.org>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <pdurrant@amazon.com>,
Kevin Tian <kevin.tian@intel.com>,
Jan Beulich <jbeulich@suse.com>, Paul Durrant <paul@xen.org>
Subject: [PATCH v2 10/10] iommu: stop calling IOMMU page tables 'p2m tables'
Date: Thu, 30 Jul 2020 15:29:26 +0100 [thread overview]
Message-ID: <20200730142926.6051-11-paul@xen.org> (raw)
In-Reply-To: <20200730142926.6051-1-paul@xen.org>
From: Paul Durrant <pdurrant@amazon.com>
It's confusing and not consistent with the terminology introduced with 'dfn_t'.
Just call them IOMMU page tables.
Also remove a pointless check of the 'acpi_drhd_units' list in
vtd_dump_page_table_level(). If the list is empty then IOMMU mappings would
not have been enabled for the domain in the first place.
NOTE: All calls to printk() have also been removed from
iommu_dump_page_tables(); the implementation specific code is now
responsible for all output.
The check for the global 'iommu_enabled' has also been replaced by an
ASSERT since iommu_dump_page_tables() is not registered as a key handler
unless IOMMU mappings are enabled.
Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Paul Durrant <paul@xen.org>
Cc: Kevin Tian <kevin.tian@intel.com>
v2:
- Moved all output into implementation specific code
---
xen/drivers/passthrough/amd/pci_amd_iommu.c | 16 ++++++-------
xen/drivers/passthrough/iommu.c | 21 ++++-------------
xen/drivers/passthrough/vtd/iommu.c | 26 +++++++++++----------
xen/include/xen/iommu.h | 2 +-
4 files changed, 28 insertions(+), 37 deletions(-)
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index d79668f948..b3e95cf18e 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -491,8 +491,8 @@ static int amd_iommu_group_id(u16 seg, u8 bus, u8 devfn)
#include <asm/io_apic.h>
-static void amd_dump_p2m_table_level(struct page_info* pg, int level,
- paddr_t gpa, int indent)
+static void amd_dump_page_table_level(struct page_info* pg, int level,
+ paddr_t gpa, int indent)
{
paddr_t address;
struct amd_iommu_pte *table_vaddr;
@@ -529,7 +529,7 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level,
address = gpa + amd_offset_level_address(index, level);
if ( pde->next_level >= 1 )
- amd_dump_p2m_table_level(
+ amd_dump_page_table_level(
mfn_to_page(_mfn(pde->mfn)), pde->next_level,
address, indent + 1);
else
@@ -542,16 +542,16 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level,
unmap_domain_page(table_vaddr);
}
-static void amd_dump_p2m_table(struct domain *d)
+static void amd_dump_page_tables(struct domain *d)
{
const struct domain_iommu *hd = dom_iommu(d);
if ( !hd->arch.amd.root_table )
return;
- printk("p2m table has %d levels\n", hd->arch.amd.paging_mode);
- amd_dump_p2m_table_level(hd->arch.amd.root_table,
- hd->arch.amd.paging_mode, 0, 0);
+ printk("AMD IOMMU table has %d levels\n", hd->arch.amd.paging_mode);
+ amd_dump_page_table_level(hd->arch.amd.root_table,
+ hd->arch.amd.paging_mode, 0, 0);
}
static const struct iommu_ops __initconstrel _iommu_ops = {
@@ -578,7 +578,7 @@ static const struct iommu_ops __initconstrel _iommu_ops = {
.suspend = amd_iommu_suspend,
.resume = amd_iommu_resume,
.crash_shutdown = amd_iommu_crash_shutdown,
- .dump_p2m_table = amd_dump_p2m_table,
+ .dump_page_tables = amd_dump_page_tables,
};
static const struct iommu_init_ops __initconstrel _iommu_init_ops = {
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 7464f10d1c..0f468379e1 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -22,7 +22,7 @@
#include <xen/keyhandler.h>
#include <xsm/xsm.h>
-static void iommu_dump_p2m_table(unsigned char key);
+static void iommu_dump_page_tables(unsigned char key);
unsigned int __read_mostly iommu_dev_iotlb_timeout = 1000;
integer_param("iommu_dev_iotlb_timeout", iommu_dev_iotlb_timeout);
@@ -212,7 +212,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
if ( !is_iommu_enabled(d) )
return;
- register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
+ register_keyhandler('o', &iommu_dump_page_tables, "dump iommu page tables", 0);
hd->platform_ops->hwdom_init(d);
}
@@ -533,16 +533,12 @@ bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
return is_iommu_enabled(d) && test_bit(feature, dom_iommu(d)->features);
}
-static void iommu_dump_p2m_table(unsigned char key)
+static void iommu_dump_page_tables(unsigned char key)
{
struct domain *d;
const struct iommu_ops *ops;
- if ( !iommu_enabled )
- {
- printk("IOMMU not enabled!\n");
- return;
- }
+ ASSERT(iommu_enabled);
ops = iommu_get_ops();
@@ -553,14 +549,7 @@ static void iommu_dump_p2m_table(unsigned char key)
if ( is_hardware_domain(d) || !is_iommu_enabled(d) )
continue;
- if ( iommu_use_hap_pt(d) )
- {
- printk("\ndomain%d IOMMU p2m table shared with MMU: \n", d->domain_id);
- continue;
- }
-
- printk("\ndomain%d IOMMU p2m table: \n", d->domain_id);
- ops->dump_p2m_table(d);
+ ops->dump_page_tables(d);
}
rcu_read_unlock(&domlist_read_lock);
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index a532d9e88c..f8da4fe0e7 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2582,8 +2582,8 @@ static void vtd_resume(void)
}
}
-static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
- int indent)
+static void vtd_dump_page_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
+ int indent)
{
paddr_t address;
int i;
@@ -2612,8 +2612,8 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
address = gpa + offset_level_address(i, level);
if ( next_level >= 1 )
- vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level,
- address, indent + 1);
+ vtd_dump_page_table_level(dma_pte_addr(*pte), next_level,
+ address, indent + 1);
else
printk("%*sdfn: %08lx mfn: %08lx\n",
indent, "",
@@ -2624,17 +2624,19 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
unmap_vtd_domain_page(pt_vaddr);
}
-static void vtd_dump_p2m_table(struct domain *d)
+static void vtd_dump_page_tables(struct domain *d)
{
- const struct domain_iommu *hd;
+ const struct domain_iommu *hd = dom_iommu(d);
- if ( list_empty(&acpi_drhd_units) )
+ if ( iommu_use_hap_pt(d) )
+ {
+ printk("VT-D sharing EPT table\n");
return;
+ }
- hd = dom_iommu(d);
- printk("p2m table has %d levels\n", agaw_to_level(hd->arch.vtd.agaw));
- vtd_dump_p2m_table_level(hd->arch.vtd.pgd_maddr,
- agaw_to_level(hd->arch.vtd.agaw), 0, 0);
+ printk("VT-D table has %d levels\n", agaw_to_level(hd->arch.vtd.agaw));
+ vtd_dump_page_table_level(hd->arch.vtd.pgd_maddr,
+ agaw_to_level(hd->arch.vtd.agaw), 0, 0);
}
static int __init intel_iommu_quarantine_init(struct domain *d)
@@ -2734,7 +2736,7 @@ static struct iommu_ops __initdata vtd_ops = {
.iotlb_flush = iommu_flush_iotlb_pages,
.iotlb_flush_all = iommu_flush_iotlb_all,
.get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
- .dump_p2m_table = vtd_dump_p2m_table,
+ .dump_page_tables = vtd_dump_page_tables,
};
const struct iommu_init_ops __initconstrel intel_iommu_init_ops = {
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 1f25d2082f..23e884f54b 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -277,7 +277,7 @@ struct iommu_ops {
unsigned int flush_flags);
int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
- void (*dump_p2m_table)(struct domain *d);
+ void (*dump_page_tables)(struct domain *d);
#ifdef CONFIG_HAS_DEVICE_TREE
/*
--
2.20.1
prev parent reply other threads:[~2020-07-30 14:40 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-30 14:29 [PATCH v2 00/10] IOMMU cleanup Paul Durrant
2020-07-30 14:29 ` [PATCH v2 01/10] x86/iommu: re-arrange arch_iommu to separate common fields Paul Durrant
2020-07-30 14:29 ` [PATCH v2 02/10] x86/iommu: add common page-table allocator Paul Durrant
2020-07-30 14:29 ` [PATCH v2 03/10] x86/iommu: convert VT-d code to use new page table allocator Paul Durrant
2020-07-30 14:29 ` [PATCH v2 04/10] x86/iommu: convert AMD IOMMU " Paul Durrant
2020-07-30 14:29 ` [PATCH v2 05/10] iommu: remove unused iommu_ops method and tasklet Paul Durrant
2020-07-30 14:29 ` [PATCH v2 06/10] iommu: flush I/O TLB if iommu_map() or iommu_unmap() fail Paul Durrant
2020-07-30 14:29 ` [PATCH v2 07/10] iommu: make map, unmap and flush all take both an order and a count Paul Durrant
2020-07-30 14:29 ` [PATCH v2 08/10] remove remaining uses of iommu_legacy_map/unmap Paul Durrant
2020-07-31 9:21 ` Durrant, Paul
2020-07-30 14:29 ` [PATCH v2 09/10] iommu: remove the share_p2m operation Paul Durrant
2020-07-30 14:29 ` Paul Durrant [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200730142926.6051-11-paul@xen.org \
--to=paul@xen.org \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=kevin.tian@intel.com \
--cc=pdurrant@amazon.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).