iommu.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
@ 2020-09-12  3:21 Lu Baolu
  2020-09-12  3:21 ` [PATCH v3 1/6] iommu: Handle freelists when using deferred flushing in iommu drivers Lu Baolu
                   ` (7 more replies)
  0 siblings, 8 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-12  3:21 UTC (permalink / raw)
  To: Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Tvrtko Ursulin, Ashok Raj, Intel-gfx, linux-kernel, iommu

Tom Murphy has almost done all the work. His latest patch series was
posted here.

https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/

Thanks a lot!

This series is a follow-up with below changes:

1. Add a quirk for the i915 driver issue described in Tom's cover
letter.
2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
bounce buffers" to make the bounce buffer work for untrusted devices.
3. Several cleanups in iommu/vt-d driver after the conversion.

Please review and test.

Best regards,
baolu

Lu Baolu (2):
  iommu: Add quirk for Intel graphic devices in map_sg
  iommu/vt-d: Cleanup after converting to dma-iommu ops

Tom Murphy (4):
  iommu: Handle freelists when using deferred flushing in iommu drivers
  iommu: Add iommu_dma_free_cpu_cached_iovas()
  iommu: Allow the dma-iommu api to use bounce buffers
  iommu/vt-d: Convert intel iommu driver to the iommu ops

 .../admin-guide/kernel-parameters.txt         |   5 -
 drivers/iommu/dma-iommu.c                     | 229 ++++-
 drivers/iommu/intel/Kconfig                   |   1 +
 drivers/iommu/intel/iommu.c                   | 885 +++---------------
 include/linux/dma-iommu.h                     |   8 +
 include/linux/iommu.h                         |   1 +
 6 files changed, 323 insertions(+), 806 deletions(-)

-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH v3 1/6] iommu: Handle freelists when using deferred flushing in iommu drivers
  2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
@ 2020-09-12  3:21 ` Lu Baolu
  2020-09-12  3:21 ` [PATCH v3 2/6] iommu: Add iommu_dma_free_cpu_cached_iovas() Lu Baolu
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-12  3:21 UTC (permalink / raw)
  To: Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Tvrtko Ursulin, Ashok Raj, Intel-gfx, linux-kernel, iommu

From: Tom Murphy <murphyt7@tcd.ie>

Allow the iommu_unmap_fast to return newly freed page table pages and
pass the freelist to queue_iova in the dma-iommu ops path.

This is useful for iommu drivers (in this case the intel iommu driver)
which need to wait for the ioTLB to be flushed before newly
free/unmapped page table pages can be freed. This way we can still batch
ioTLB free operations and handle the freelists.

Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/dma-iommu.c   | 30 ++++++++++++++------
 drivers/iommu/intel/iommu.c | 55 ++++++++++++++++++++++++-------------
 include/linux/iommu.h       |  1 +
 3 files changed, 59 insertions(+), 27 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 5141d49a046b..82c071b2d5c8 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -50,6 +50,18 @@ struct iommu_dma_cookie {
 	struct iommu_domain		*fq_domain;
 };
 
+static void iommu_dma_entry_dtor(unsigned long data)
+{
+	struct page *freelist = (struct page *)data;
+
+	while (freelist) {
+		unsigned long p = (unsigned long)page_address(freelist);
+
+		freelist = freelist->freelist;
+		free_page(p);
+	}
+}
+
 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
 {
 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
@@ -344,7 +356,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
 	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
 			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
 		cookie->fq_domain = domain;
-		init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
+		init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
+				      iommu_dma_entry_dtor);
 	}
 
 	if (!dev)
@@ -438,7 +451,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 }
 
 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
-		dma_addr_t iova, size_t size)
+		dma_addr_t iova, size_t size, struct page *freelist)
 {
 	struct iova_domain *iovad = &cookie->iovad;
 
@@ -447,7 +460,8 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
 		cookie->msi_iova -= size;
 	else if (cookie->fq_domain)	/* non-strict mode */
 		queue_iova(iovad, iova_pfn(iovad, iova),
-				size >> iova_shift(iovad), 0);
+				size >> iova_shift(iovad),
+				(unsigned long)freelist);
 	else
 		free_iova_fast(iovad, iova_pfn(iovad, iova),
 				size >> iova_shift(iovad));
@@ -472,7 +486,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
 
 	if (!cookie->fq_domain)
 		iommu_tlb_sync(domain, &iotlb_gather);
-	iommu_dma_free_iova(cookie, dma_addr, size);
+	iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@@ -494,7 +508,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
 		return DMA_MAPPING_ERROR;
 
 	if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
-		iommu_dma_free_iova(cookie, iova, size);
+		iommu_dma_free_iova(cookie, iova, size, NULL);
 		return DMA_MAPPING_ERROR;
 	}
 	return iova + iova_off;
@@ -649,7 +663,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
 out_free_sg:
 	sg_free_table(&sgt);
 out_free_iova:
-	iommu_dma_free_iova(cookie, iova, size);
+	iommu_dma_free_iova(cookie, iova, size, NULL);
 out_free_pages:
 	__iommu_dma_free_pages(pages, count);
 	return NULL;
@@ -900,7 +914,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	return __finalise_sg(dev, sg, nents, iova);
 
 out_free_iova:
-	iommu_dma_free_iova(cookie, iova, iova_len);
+	iommu_dma_free_iova(cookie, iova, iova_len, NULL);
 out_restore_sg:
 	__invalidate_sg(sg, nents);
 	return 0;
@@ -1194,7 +1208,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
 	return msi_page;
 
 out_free_iova:
-	iommu_dma_free_iova(cookie, iova, size);
+	iommu_dma_free_iova(cookie, iova, size, NULL);
 out_free_page:
 	kfree(msi_page);
 	return NULL;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 87b17bac04c2..63ee30c689a7 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1208,17 +1208,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
    pages can only be freed after the IOTLB flush has been done. */
 static struct page *domain_unmap(struct dmar_domain *domain,
 				 unsigned long start_pfn,
-				 unsigned long last_pfn)
+				 unsigned long last_pfn,
+				 struct page *freelist)
 {
-	struct page *freelist;
-
 	BUG_ON(!domain_pfn_supported(domain, start_pfn));
 	BUG_ON(!domain_pfn_supported(domain, last_pfn));
 	BUG_ON(start_pfn > last_pfn);
 
 	/* we don't need lock here; nobody else touches the iova range */
 	freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
-				       domain->pgd, 0, start_pfn, last_pfn, NULL);
+				       domain->pgd, 0, start_pfn, last_pfn,
+				       freelist);
 
 	/* free pgd */
 	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
@@ -1976,7 +1976,8 @@ static void domain_exit(struct dmar_domain *domain)
 	if (domain->pgd) {
 		struct page *freelist;
 
-		freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+		freelist = domain_unmap(domain, 0,
+					DOMAIN_MAX_PFN(domain->gaw), NULL);
 		dma_free_pagelist(freelist);
 	}
 
@@ -3532,7 +3533,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
 	if (dev_is_pci(dev))
 		pdev = to_pci_dev(dev);
 
-	freelist = domain_unmap(domain, start_pfn, last_pfn);
+	freelist = domain_unmap(domain, start_pfn, last_pfn, NULL);
 	if (intel_iommu_strict || (pdev && pdev->untrusted) ||
 			!has_iova_flush_queue(&domain->iovad)) {
 		iommu_flush_iotlb_psi(iommu, domain, start_pfn,
@@ -4595,7 +4596,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
 			struct page *freelist;
 
 			freelist = domain_unmap(si_domain,
-						start_vpfn, last_vpfn);
+						start_vpfn, last_vpfn,
+						NULL);
 
 			rcu_read_lock();
 			for_each_active_iommu(iommu, drhd)
@@ -5570,10 +5572,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
 				struct iommu_iotlb_gather *gather)
 {
 	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
-	struct page *freelist = NULL;
 	unsigned long start_pfn, last_pfn;
-	unsigned int npages;
-	int iommu_id, level = 0;
+	int level = 0;
 
 	/* Cope with horrid API which requires us to unmap more than the
 	   size argument if it happens to be a large-page mapping. */
@@ -5585,22 +5585,38 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
 	start_pfn = iova >> VTD_PAGE_SHIFT;
 	last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
 
-	freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
-
-	npages = last_pfn - start_pfn + 1;
-
-	for_each_domain_iommu(iommu_id, dmar_domain)
-		iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
-				      start_pfn, npages, !freelist, 0);
-
-	dma_free_pagelist(freelist);
+	gather->freelist = domain_unmap(dmar_domain, start_pfn,
+					last_pfn, gather->freelist);
 
 	if (dmar_domain->max_addr == iova + size)
 		dmar_domain->max_addr = iova;
 
+	iommu_iotlb_gather_add_page(domain, gather, iova, size);
+
 	return size;
 }
 
+static void intel_iommu_tlb_sync(struct iommu_domain *domain,
+				 struct iommu_iotlb_gather *gather)
+{
+	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+	unsigned long iova_pfn = IOVA_PFN(gather->start);
+	size_t size = gather->end - gather->start;
+	unsigned long start_pfn, last_pfn;
+	unsigned long nrpages;
+	int iommu_id;
+
+	nrpages = aligned_nrpages(gather->start, size);
+	start_pfn = mm_to_dma_pfn(iova_pfn);
+	last_pfn = start_pfn + nrpages - 1;
+
+	for_each_domain_iommu(iommu_id, dmar_domain)
+		iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+				      start_pfn, nrpages, !gather->freelist, 0);
+
+	dma_free_pagelist(gather->freelist);
+}
+
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
 					    dma_addr_t iova)
 {
@@ -6060,6 +6076,7 @@ const struct iommu_ops intel_iommu_ops = {
 	.aux_get_pasid		= intel_iommu_aux_get_pasid,
 	.map			= intel_iommu_map,
 	.unmap			= intel_iommu_unmap,
+	.iotlb_sync		= intel_iommu_tlb_sync,
 	.iova_to_phys		= intel_iommu_iova_to_phys,
 	.probe_device		= intel_iommu_probe_device,
 	.probe_finalize		= intel_iommu_probe_finalize,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index fee209efb756..f0d56360a976 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -180,6 +180,7 @@ struct iommu_iotlb_gather {
 	unsigned long		start;
 	unsigned long		end;
 	size_t			pgsize;
+	struct page		*freelist;
 };
 
 /**
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v3 2/6] iommu: Add iommu_dma_free_cpu_cached_iovas()
  2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
  2020-09-12  3:21 ` [PATCH v3 1/6] iommu: Handle freelists when using deferred flushing in iommu drivers Lu Baolu
@ 2020-09-12  3:21 ` Lu Baolu
  2020-09-12  3:21 ` [PATCH v3 3/6] iommu: Allow the dma-iommu api to use bounce buffers Lu Baolu
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-12  3:21 UTC (permalink / raw)
  To: Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Tvrtko Ursulin, Ashok Raj, Intel-gfx, linux-kernel, iommu

From: Tom Murphy <murphyt7@tcd.ie>

Add a iommu_dma_free_cpu_cached_iovas function to allow drivers which
use the dma-iommu ops to free cached cpu iovas.

Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/dma-iommu.c | 9 +++++++++
 include/linux/dma-iommu.h | 8 ++++++++
 2 files changed, 17 insertions(+)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 82c071b2d5c8..d06411bd5e08 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -50,6 +50,15 @@ struct iommu_dma_cookie {
 	struct iommu_domain		*fq_domain;
 };
 
+void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+		struct iommu_domain *domain)
+{
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
+
+	free_cpu_cached_iovas(cpu, iovad);
+}
+
 static void iommu_dma_entry_dtor(unsigned long data)
 {
 	struct page *freelist = (struct page *)data;
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 2112f21f73d8..706b68d1359b 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -37,6 +37,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
 
 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
+void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+		struct iommu_domain *domain);
+
 #else /* CONFIG_IOMMU_DMA */
 
 struct iommu_domain;
@@ -78,5 +81,10 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
 {
 }
 
+static inline void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+		struct iommu_domain *domain)
+{
+}
+
 #endif	/* CONFIG_IOMMU_DMA */
 #endif	/* __DMA_IOMMU_H */
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v3 3/6] iommu: Allow the dma-iommu api to use bounce buffers
  2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
  2020-09-12  3:21 ` [PATCH v3 1/6] iommu: Handle freelists when using deferred flushing in iommu drivers Lu Baolu
  2020-09-12  3:21 ` [PATCH v3 2/6] iommu: Add iommu_dma_free_cpu_cached_iovas() Lu Baolu
@ 2020-09-12  3:21 ` Lu Baolu
  2020-09-12  3:21 ` [PATCH v3 4/6] iommu: Add quirk for Intel graphic devices in map_sg Lu Baolu
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-12  3:21 UTC (permalink / raw)
  To: Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Tvrtko Ursulin, Ashok Raj, Intel-gfx, linux-kernel, iommu

From: Tom Murphy <murphyt7@tcd.ie>

Allow the dma-iommu api to use bounce buffers for untrusted devices.
This is a copy of the intel bounce buffer code.

Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
Co-developed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/dma-iommu.c | 163 +++++++++++++++++++++++++++++++++++---
 1 file changed, 150 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d06411bd5e08..1a1da22e5a5e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,9 +21,11 @@
 #include <linux/mm.h>
 #include <linux/mutex.h>
 #include <linux/pci.h>
+#include <linux/swiotlb.h>
 #include <linux/scatterlist.h>
 #include <linux/vmalloc.h>
 #include <linux/crash_dump.h>
+#include <linux/dma-direct.h>
 
 struct iommu_dma_msi_page {
 	struct list_head	list;
@@ -498,6 +500,31 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
 	iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
 }
 
+static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
+		size_t size, enum dma_data_direction dir,
+		unsigned long attrs)
+{
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
+	phys_addr_t phys;
+
+	phys = iommu_iova_to_phys(domain, dma_addr);
+	if (WARN_ON(!phys))
+		return;
+
+	__iommu_dma_unmap(dev, dma_addr, size);
+
+	if (unlikely(is_swiotlb_buffer(phys)))
+		swiotlb_tbl_unmap_single(dev, phys, size,
+				iova_align(iovad, size), dir, attrs);
+}
+
+static bool dev_is_untrusted(struct device *dev)
+{
+	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
+}
+
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
 		size_t size, int prot, u64 dma_mask)
 {
@@ -523,6 +550,55 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
 	return iova + iova_off;
 }
 
+static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
+		size_t org_size, dma_addr_t dma_mask, bool coherent,
+		enum dma_data_direction dir, unsigned long attrs)
+{
+	int prot = dma_info_to_prot(dir, coherent, attrs);
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
+	size_t aligned_size = org_size;
+	void *padding_start;
+	size_t padding_size;
+	dma_addr_t iova;
+
+	/*
+	 * If both the physical buffer start address and size are
+	 * page aligned, we don't need to use a bounce page.
+	 */
+	if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+	    iova_offset(iovad, phys | org_size)) {
+		aligned_size = iova_align(iovad, org_size);
+		phys = swiotlb_tbl_map_single(dev,
+				__phys_to_dma(dev, io_tlb_start),
+				phys, org_size, aligned_size, dir, attrs);
+
+		if (phys == DMA_MAPPING_ERROR)
+			return DMA_MAPPING_ERROR;
+
+		/* Cleanup the padding area. */
+		padding_start = phys_to_virt(phys);
+		padding_size = aligned_size;
+
+		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+		    (dir == DMA_TO_DEVICE ||
+		     dir == DMA_BIDIRECTIONAL)) {
+			padding_start += org_size;
+			padding_size -= org_size;
+		}
+
+		memset(padding_start, 0, padding_size);
+	}
+
+	iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
+	if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
+		swiotlb_tbl_unmap_single(dev, phys, org_size,
+				aligned_size, dir, attrs);
+
+	return iova;
+}
+
 static void __iommu_dma_free_pages(struct page **pages, int count)
 {
 	while (count--)
@@ -698,11 +774,15 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
 {
 	phys_addr_t phys;
 
-	if (dev_is_dma_coherent(dev))
+	if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
 		return;
 
 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
-	arch_sync_dma_for_cpu(phys, size, dir);
+	if (!dev_is_dma_coherent(dev))
+		arch_sync_dma_for_cpu(phys, size, dir);
+
+	if (is_swiotlb_buffer(phys))
+		swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
 }
 
 static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -710,11 +790,15 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
 {
 	phys_addr_t phys;
 
-	if (dev_is_dma_coherent(dev))
+	if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
 		return;
 
 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
-	arch_sync_dma_for_device(phys, size, dir);
+	if (is_swiotlb_buffer(phys))
+		swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
+
+	if (!dev_is_dma_coherent(dev))
+		arch_sync_dma_for_device(phys, size, dir);
 }
 
 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@@ -724,11 +808,17 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
 	struct scatterlist *sg;
 	int i;
 
-	if (dev_is_dma_coherent(dev))
+	if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
 		return;
 
-	for_each_sg(sgl, sg, nelems, i)
-		arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
+	for_each_sg(sgl, sg, nelems, i) {
+		if (!dev_is_dma_coherent(dev))
+			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
+
+		if (is_swiotlb_buffer(sg_phys(sg)))
+			swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
+						dir, SYNC_FOR_CPU);
+	}
 }
 
 static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -738,11 +828,17 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
 	struct scatterlist *sg;
 	int i;
 
-	if (dev_is_dma_coherent(dev))
+	if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
 		return;
 
-	for_each_sg(sgl, sg, nelems, i)
-		arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
+	for_each_sg(sgl, sg, nelems, i) {
+		if (is_swiotlb_buffer(sg_phys(sg)))
+			swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
+						dir, SYNC_FOR_DEVICE);
+
+		if (!dev_is_dma_coherent(dev))
+			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
+	}
 }
 
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -751,10 +847,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 {
 	phys_addr_t phys = page_to_phys(page) + offset;
 	bool coherent = dev_is_dma_coherent(dev);
-	int prot = dma_info_to_prot(dir, coherent, attrs);
 	dma_addr_t dma_handle;
 
-	dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
+	dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
+			coherent, dir, attrs);
 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
 	    dma_handle != DMA_MAPPING_ERROR)
 		arch_sync_dma_for_device(phys, size, dir);
@@ -766,7 +862,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
 {
 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
-	__iommu_dma_unmap(dev, dma_handle, size);
+	__iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
 }
 
 /*
@@ -844,6 +940,39 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
 	}
 }
 
+static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
+		int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		__iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
+				sg_dma_len(s), dir, attrs);
+}
+
+static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
+		int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i) {
+		sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
+				s->length, dma_get_mask(dev),
+				dev_is_dma_coherent(dev), dir, attrs);
+		if (sg_dma_address(s) == DMA_MAPPING_ERROR)
+			goto out_unmap;
+		sg_dma_len(s) = s->length;
+	}
+
+	return nents;
+
+out_unmap:
+	iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+	return 0;
+}
+
 /*
  * The DMA API client is passing in a scatterlist which could describe
  * any old buffer layout, but the IOMMU API requires everything to be
@@ -870,6 +999,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
 
+	if (dev_is_untrusted(dev))
+		return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
+
 	/*
 	 * Work out how much IOVA space we need, and align the segments to
 	 * IOVA granules for the IOMMU driver to handle. With some clever
@@ -939,6 +1071,11 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
 
+	if (dev_is_untrusted(dev)) {
+		iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
+		return;
+	}
+
 	/*
 	 * The scatterlist segments are mapped into a single
 	 * contiguous IOVA allocation, so this is incredibly easy.
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v3 4/6] iommu: Add quirk for Intel graphic devices in map_sg
  2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
                   ` (2 preceding siblings ...)
  2020-09-12  3:21 ` [PATCH v3 3/6] iommu: Allow the dma-iommu api to use bounce buffers Lu Baolu
@ 2020-09-12  3:21 ` Lu Baolu
  2020-09-12  3:21 ` [PATCH v3 5/6] iommu/vt-d: Convert intel iommu driver to the iommu ops Lu Baolu
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-12  3:21 UTC (permalink / raw)
  To: Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Tvrtko Ursulin, Ashok Raj, Intel-gfx, linux-kernel, iommu

Combining the sg segments exposes a bug in the Intel i915 driver which
causes visual artifacts and the screen to freeze. This is most likely
because of how the i915 handles the returned list. It probably doesn't
respect the returned value specifying the number of elements in the list
and instead depends on the previous behaviour of the Intel iommu driver
which would return the same number of elements in the output list as in
the input list.

Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/dma-iommu.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 1a1da22e5a5e..fc19f1fb9413 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -880,6 +880,33 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
 	int i, count = 0;
 
+	/*
+	 * The Intel graphic driver is used to assume that the returned
+	 * sg list is not combound. This blocks the efforts of converting
+	 * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
+	 * device driver work and should be removed once it's fixed in i915
+	 * driver.
+	 */
+	if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
+	    to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
+	    (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+		for_each_sg(sg, s, nents, i) {
+			unsigned int s_iova_off = sg_dma_address(s);
+			unsigned int s_length = sg_dma_len(s);
+			unsigned int s_iova_len = s->length;
+
+			s->offset += s_iova_off;
+			s->length = s_length;
+			sg_dma_address(s) = dma_addr + s_iova_off;
+			sg_dma_len(s) = s_length;
+			dma_addr += s_iova_len;
+
+			pr_info_once("sg combining disabled due to i915 driver\n");
+		}
+
+		return nents;
+	}
+
 	for_each_sg(sg, s, nents, i) {
 		/* Restore this segment's original unaligned fields first */
 		unsigned int s_iova_off = sg_dma_address(s);
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v3 5/6] iommu/vt-d: Convert intel iommu driver to the iommu ops
  2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
                   ` (3 preceding siblings ...)
  2020-09-12  3:21 ` [PATCH v3 4/6] iommu: Add quirk for Intel graphic devices in map_sg Lu Baolu
@ 2020-09-12  3:21 ` Lu Baolu
  2020-09-12  3:22 ` [PATCH v3 6/6] iommu/vt-d: Cleanup after converting to dma-iommu ops Lu Baolu
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-12  3:21 UTC (permalink / raw)
  To: Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Tvrtko Ursulin, Ashok Raj, Intel-gfx, linux-kernel, iommu

From: Tom Murphy <murphyt7@tcd.ie>

Convert the intel iommu driver to the dma-iommu api. Remove the iova
handling and reserve region code from the intel iommu driver.

Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/intel/Kconfig |   1 +
 drivers/iommu/intel/iommu.c | 742 ++----------------------------------
 2 files changed, 43 insertions(+), 700 deletions(-)

diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 5337ee1584b0..28a3d1596c76 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -13,6 +13,7 @@ config INTEL_IOMMU
 	select DMAR_TABLE
 	select SWIOTLB
 	select IOASID
+	select IOMMU_DMA
 	help
 	  DMA remapping (DMAR) devices support enables independent address
 	  translations for Direct Memory Access (DMA) from devices.
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 63ee30c689a7..adc231790e0a 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/iova.h>
 #include <linux/iommu.h>
+#include <linux/dma-iommu.h>
 #include <linux/intel-iommu.h>
 #include <linux/syscore_ops.h>
 #include <linux/tboot.h>
@@ -41,7 +42,6 @@
 #include <linux/dma-direct.h>
 #include <linux/crash_dump.h>
 #include <linux/numa.h>
-#include <linux/swiotlb.h>
 #include <asm/irq_remapping.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
@@ -382,9 +382,6 @@ struct device_domain_info *get_domain_info(struct device *dev)
 DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
-#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) &&	\
-				to_pci_dev(d)->untrusted)
-
 /*
  * Iterate over elements in device_domain_list and call the specified
  * callback @fn against each element.
@@ -1242,13 +1239,6 @@ static void dma_free_pagelist(struct page *freelist)
 	}
 }
 
-static void iova_entry_free(unsigned long data)
-{
-	struct page *freelist = (struct page *)data;
-
-	dma_free_pagelist(freelist);
-}
-
 /* iommu handling */
 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
 {
@@ -1613,19 +1603,17 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
 		iommu_flush_write_buffer(iommu);
 }
 
-static void iommu_flush_iova(struct iova_domain *iovad)
+static void intel_flush_iotlb_all(struct iommu_domain *domain)
 {
-	struct dmar_domain *domain;
+	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
 	int idx;
 
-	domain = container_of(iovad, struct dmar_domain, iovad);
-
-	for_each_domain_iommu(idx, domain) {
+	for_each_domain_iommu(idx, dmar_domain) {
 		struct intel_iommu *iommu = g_iommus[idx];
-		u16 did = domain->iommu_did[iommu->seq_id];
+		u16 did = dmar_domain->iommu_did[iommu->seq_id];
 
-		if (domain_use_first_level(domain))
-			domain_flush_piotlb(iommu, domain, 0, -1, 0);
+		if (domain_use_first_level(dmar_domain))
+			domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0);
 		else
 			iommu->flush.flush_iotlb(iommu, did, 0, 0,
 						 DMA_TLB_DSI_FLUSH);
@@ -1907,48 +1895,6 @@ static int domain_detach_iommu(struct dmar_domain *domain,
 	return count;
 }
 
-static struct iova_domain reserved_iova_list;
-static struct lock_class_key reserved_rbtree_key;
-
-static int dmar_init_reserved_ranges(void)
-{
-	struct pci_dev *pdev = NULL;
-	struct iova *iova;
-	int i;
-
-	init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
-
-	lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
-		&reserved_rbtree_key);
-
-	/* IOAPIC ranges shouldn't be accessed by DMA */
-	iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
-		IOVA_PFN(IOAPIC_RANGE_END));
-	if (!iova) {
-		pr_err("Reserve IOAPIC range failed\n");
-		return -ENODEV;
-	}
-
-	/* Reserve all PCI MMIO to avoid peer-to-peer access */
-	for_each_pci_dev(pdev) {
-		struct resource *r;
-
-		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-			r = &pdev->resource[i];
-			if (!r->flags || !(r->flags & IORESOURCE_MEM))
-				continue;
-			iova = reserve_iova(&reserved_iova_list,
-					    IOVA_PFN(r->start),
-					    IOVA_PFN(r->end));
-			if (!iova) {
-				pci_err(pdev, "Reserve iova for %pR failed\n", r);
-				return -ENODEV;
-			}
-		}
-	}
-	return 0;
-}
-
 static inline int guestwidth_to_adjustwidth(int gaw)
 {
 	int agaw;
@@ -1971,7 +1917,7 @@ static void domain_exit(struct dmar_domain *domain)
 
 	/* destroy iovas */
 	if (domain->domain.type == IOMMU_DOMAIN_DMA)
-		put_iova_domain(&domain->iovad);
+		iommu_put_dma_cookie(&domain->domain);
 
 	if (domain->pgd) {
 		struct page *freelist;
@@ -2502,16 +2448,6 @@ struct dmar_domain *find_domain(struct device *dev)
 	return NULL;
 }
 
-static void do_deferred_attach(struct device *dev)
-{
-	struct iommu_domain *domain;
-
-	dev_iommu_priv_set(dev, NULL);
-	domain = iommu_get_domain_for_dev(dev);
-	if (domain)
-		intel_iommu_attach_device(domain, dev);
-}
-
 static inline struct device_domain_info *
 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
 {
@@ -3384,591 +3320,6 @@ static int __init init_dmars(void)
 	return ret;
 }
 
-/* This takes a number of _MM_ pages, not VTD pages */
-static unsigned long intel_alloc_iova(struct device *dev,
-				     struct dmar_domain *domain,
-				     unsigned long nrpages, uint64_t dma_mask)
-{
-	unsigned long iova_pfn;
-
-	/*
-	 * Restrict dma_mask to the width that the iommu can handle.
-	 * First-level translation restricts the input-address to a
-	 * canonical address (i.e., address bits 63:N have the same
-	 * value as address bit [N-1], where N is 48-bits with 4-level
-	 * paging and 57-bits with 5-level paging). Hence, skip bit
-	 * [N-1].
-	 */
-	if (domain_use_first_level(domain))
-		dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1),
-				 dma_mask);
-	else
-		dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw),
-				 dma_mask);
-
-	/* Ensure we reserve the whole size-aligned region */
-	nrpages = __roundup_pow_of_two(nrpages);
-
-	if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
-		/*
-		 * First try to allocate an io virtual address in
-		 * DMA_BIT_MASK(32) and if that fails then try allocating
-		 * from higher range
-		 */
-		iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
-					   IOVA_PFN(DMA_BIT_MASK(32)), false);
-		if (iova_pfn)
-			return iova_pfn;
-	}
-	iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
-				   IOVA_PFN(dma_mask), true);
-	if (unlikely(!iova_pfn)) {
-		dev_err_once(dev, "Allocating %ld-page iova failed\n",
-			     nrpages);
-		return 0;
-	}
-
-	return iova_pfn;
-}
-
-static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
-				     size_t size, int dir, u64 dma_mask)
-{
-	struct dmar_domain *domain;
-	phys_addr_t start_paddr;
-	unsigned long iova_pfn;
-	int prot = 0;
-	int ret;
-	struct intel_iommu *iommu;
-	unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
-
-	BUG_ON(dir == DMA_NONE);
-
-	if (unlikely(attach_deferred(dev)))
-		do_deferred_attach(dev);
-
-	domain = find_domain(dev);
-	if (!domain)
-		return DMA_MAPPING_ERROR;
-
-	iommu = domain_get_iommu(domain);
-	size = aligned_nrpages(paddr, size);
-
-	iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
-	if (!iova_pfn)
-		goto error;
-
-	/*
-	 * Check if DMAR supports zero-length reads on write only
-	 * mappings..
-	 */
-	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
-			!cap_zlr(iommu->cap))
-		prot |= DMA_PTE_READ;
-	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
-		prot |= DMA_PTE_WRITE;
-	/*
-	 * paddr - (paddr + size) might be partial page, we should map the whole
-	 * page.  Note: if two part of one page are separately mapped, we
-	 * might have two guest_addr mapping to the same host paddr, but this
-	 * is not a big problem
-	 */
-	ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
-				 mm_to_dma_pfn(paddr_pfn), size, prot);
-	if (ret)
-		goto error;
-
-	start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
-	start_paddr += paddr & ~PAGE_MASK;
-
-	trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
-
-	return start_paddr;
-
-error:
-	if (iova_pfn)
-		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
-	dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
-		size, (unsigned long long)paddr, dir);
-	return DMA_MAPPING_ERROR;
-}
-
-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
-				 unsigned long offset, size_t size,
-				 enum dma_data_direction dir,
-				 unsigned long attrs)
-{
-	return __intel_map_single(dev, page_to_phys(page) + offset,
-				  size, dir, *dev->dma_mask);
-}
-
-static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
-				     size_t size, enum dma_data_direction dir,
-				     unsigned long attrs)
-{
-	return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
-}
-
-static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
-{
-	struct dmar_domain *domain;
-	unsigned long start_pfn, last_pfn;
-	unsigned long nrpages;
-	unsigned long iova_pfn;
-	struct intel_iommu *iommu;
-	struct page *freelist;
-	struct pci_dev *pdev = NULL;
-
-	domain = find_domain(dev);
-	BUG_ON(!domain);
-
-	iommu = domain_get_iommu(domain);
-
-	iova_pfn = IOVA_PFN(dev_addr);
-
-	nrpages = aligned_nrpages(dev_addr, size);
-	start_pfn = mm_to_dma_pfn(iova_pfn);
-	last_pfn = start_pfn + nrpages - 1;
-
-	if (dev_is_pci(dev))
-		pdev = to_pci_dev(dev);
-
-	freelist = domain_unmap(domain, start_pfn, last_pfn, NULL);
-	if (intel_iommu_strict || (pdev && pdev->untrusted) ||
-			!has_iova_flush_queue(&domain->iovad)) {
-		iommu_flush_iotlb_psi(iommu, domain, start_pfn,
-				      nrpages, !freelist, 0);
-		/* free iova */
-		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
-		dma_free_pagelist(freelist);
-	} else {
-		queue_iova(&domain->iovad, iova_pfn, nrpages,
-			   (unsigned long)freelist);
-		/*
-		 * queue up the release of the unmap to save the 1/6th of the
-		 * cpu used up by the iotlb flush operation...
-		 */
-	}
-
-	trace_unmap_single(dev, dev_addr, size);
-}
-
-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
-			     size_t size, enum dma_data_direction dir,
-			     unsigned long attrs)
-{
-	intel_unmap(dev, dev_addr, size);
-}
-
-static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
-		size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-	intel_unmap(dev, dev_addr, size);
-}
-
-static void *intel_alloc_coherent(struct device *dev, size_t size,
-				  dma_addr_t *dma_handle, gfp_t flags,
-				  unsigned long attrs)
-{
-	struct page *page = NULL;
-	int order;
-
-	if (unlikely(attach_deferred(dev)))
-		do_deferred_attach(dev);
-
-	size = PAGE_ALIGN(size);
-	order = get_order(size);
-
-	if (gfpflags_allow_blocking(flags)) {
-		unsigned int count = size >> PAGE_SHIFT;
-
-		page = dma_alloc_from_contiguous(dev, count, order,
-						 flags & __GFP_NOWARN);
-	}
-
-	if (!page)
-		page = alloc_pages(flags, order);
-	if (!page)
-		return NULL;
-	memset(page_address(page), 0, size);
-
-	*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
-					 DMA_BIDIRECTIONAL,
-					 dev->coherent_dma_mask);
-	if (*dma_handle != DMA_MAPPING_ERROR)
-		return page_address(page);
-	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
-		__free_pages(page, order);
-
-	return NULL;
-}
-
-static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
-				dma_addr_t dma_handle, unsigned long attrs)
-{
-	int order;
-	struct page *page = virt_to_page(vaddr);
-
-	size = PAGE_ALIGN(size);
-	order = get_order(size);
-
-	intel_unmap(dev, dma_handle, size);
-	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
-		__free_pages(page, order);
-}
-
-static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs)
-{
-	dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
-	unsigned long nrpages = 0;
-	struct scatterlist *sg;
-	int i;
-
-	for_each_sg(sglist, sg, nelems, i) {
-		nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
-	}
-
-	intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
-
-	trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
-}
-
-static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
-			enum dma_data_direction dir, unsigned long attrs)
-{
-	int i;
-	struct dmar_domain *domain;
-	size_t size = 0;
-	int prot = 0;
-	unsigned long iova_pfn;
-	int ret;
-	struct scatterlist *sg;
-	unsigned long start_vpfn;
-	struct intel_iommu *iommu;
-
-	BUG_ON(dir == DMA_NONE);
-
-	if (unlikely(attach_deferred(dev)))
-		do_deferred_attach(dev);
-
-	domain = find_domain(dev);
-	if (!domain)
-		return 0;
-
-	iommu = domain_get_iommu(domain);
-
-	for_each_sg(sglist, sg, nelems, i)
-		size += aligned_nrpages(sg->offset, sg->length);
-
-	iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
-				*dev->dma_mask);
-	if (!iova_pfn) {
-		sglist->dma_length = 0;
-		return 0;
-	}
-
-	/*
-	 * Check if DMAR supports zero-length reads on write only
-	 * mappings..
-	 */
-	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
-			!cap_zlr(iommu->cap))
-		prot |= DMA_PTE_READ;
-	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
-		prot |= DMA_PTE_WRITE;
-
-	start_vpfn = mm_to_dma_pfn(iova_pfn);
-
-	ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
-	if (unlikely(ret)) {
-		dma_pte_free_pagetable(domain, start_vpfn,
-				       start_vpfn + size - 1,
-				       agaw_to_level(domain->agaw) + 1);
-		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
-		return 0;
-	}
-
-	for_each_sg(sglist, sg, nelems, i)
-		trace_map_sg(dev, i + 1, nelems, sg);
-
-	return nelems;
-}
-
-static u64 intel_get_required_mask(struct device *dev)
-{
-	return DMA_BIT_MASK(32);
-}
-
-static const struct dma_map_ops intel_dma_ops = {
-	.alloc = intel_alloc_coherent,
-	.free = intel_free_coherent,
-	.map_sg = intel_map_sg,
-	.unmap_sg = intel_unmap_sg,
-	.map_page = intel_map_page,
-	.unmap_page = intel_unmap_page,
-	.map_resource = intel_map_resource,
-	.unmap_resource = intel_unmap_resource,
-	.dma_supported = dma_direct_supported,
-	.mmap = dma_common_mmap,
-	.get_sgtable = dma_common_get_sgtable,
-	.get_required_mask = intel_get_required_mask,
-};
-
-static void
-bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
-		   enum dma_data_direction dir, enum dma_sync_target target)
-{
-	struct dmar_domain *domain;
-	phys_addr_t tlb_addr;
-
-	domain = find_domain(dev);
-	if (WARN_ON(!domain))
-		return;
-
-	tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
-	if (is_swiotlb_buffer(tlb_addr))
-		swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
-}
-
-static dma_addr_t
-bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
-		  enum dma_data_direction dir, unsigned long attrs,
-		  u64 dma_mask)
-{
-	size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
-	struct dmar_domain *domain;
-	struct intel_iommu *iommu;
-	unsigned long iova_pfn;
-	unsigned long nrpages;
-	phys_addr_t tlb_addr;
-	int prot = 0;
-	int ret;
-
-	if (unlikely(attach_deferred(dev)))
-		do_deferred_attach(dev);
-
-	domain = find_domain(dev);
-
-	if (WARN_ON(dir == DMA_NONE || !domain))
-		return DMA_MAPPING_ERROR;
-
-	iommu = domain_get_iommu(domain);
-	if (WARN_ON(!iommu))
-		return DMA_MAPPING_ERROR;
-
-	nrpages = aligned_nrpages(0, size);
-	iova_pfn = intel_alloc_iova(dev, domain,
-				    dma_to_mm_pfn(nrpages), dma_mask);
-	if (!iova_pfn)
-		return DMA_MAPPING_ERROR;
-
-	/*
-	 * Check if DMAR supports zero-length reads on write only
-	 * mappings..
-	 */
-	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
-			!cap_zlr(iommu->cap))
-		prot |= DMA_PTE_READ;
-	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
-		prot |= DMA_PTE_WRITE;
-
-	/*
-	 * If both the physical buffer start address and size are
-	 * page aligned, we don't need to use a bounce page.
-	 */
-	if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
-		tlb_addr = swiotlb_tbl_map_single(dev,
-				__phys_to_dma(dev, io_tlb_start),
-				paddr, size, aligned_size, dir, attrs);
-		if (tlb_addr == DMA_MAPPING_ERROR) {
-			goto swiotlb_error;
-		} else {
-			/* Cleanup the padding area. */
-			void *padding_start = phys_to_virt(tlb_addr);
-			size_t padding_size = aligned_size;
-
-			if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-			    (dir == DMA_TO_DEVICE ||
-			     dir == DMA_BIDIRECTIONAL)) {
-				padding_start += size;
-				padding_size -= size;
-			}
-
-			memset(padding_start, 0, padding_size);
-		}
-	} else {
-		tlb_addr = paddr;
-	}
-
-	ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
-				 tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
-	if (ret)
-		goto mapping_error;
-
-	trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
-
-	return (phys_addr_t)iova_pfn << PAGE_SHIFT;
-
-mapping_error:
-	if (is_swiotlb_buffer(tlb_addr))
-		swiotlb_tbl_unmap_single(dev, tlb_addr, size,
-					 aligned_size, dir, attrs);
-swiotlb_error:
-	free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
-	dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
-		size, (unsigned long long)paddr, dir);
-
-	return DMA_MAPPING_ERROR;
-}
-
-static void
-bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
-		    enum dma_data_direction dir, unsigned long attrs)
-{
-	size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
-	struct dmar_domain *domain;
-	phys_addr_t tlb_addr;
-
-	domain = find_domain(dev);
-	if (WARN_ON(!domain))
-		return;
-
-	tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
-	if (WARN_ON(!tlb_addr))
-		return;
-
-	intel_unmap(dev, dev_addr, size);
-	if (is_swiotlb_buffer(tlb_addr))
-		swiotlb_tbl_unmap_single(dev, tlb_addr, size,
-					 aligned_size, dir, attrs);
-
-	trace_bounce_unmap_single(dev, dev_addr, size);
-}
-
-static dma_addr_t
-bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
-		size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-	return bounce_map_single(dev, page_to_phys(page) + offset,
-				 size, dir, attrs, *dev->dma_mask);
-}
-
-static dma_addr_t
-bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
-		    enum dma_data_direction dir, unsigned long attrs)
-{
-	return bounce_map_single(dev, phys_addr, size,
-				 dir, attrs, *dev->dma_mask);
-}
-
-static void
-bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
-		  enum dma_data_direction dir, unsigned long attrs)
-{
-	bounce_unmap_single(dev, dev_addr, size, dir, attrs);
-}
-
-static void
-bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
-		      enum dma_data_direction dir, unsigned long attrs)
-{
-	bounce_unmap_single(dev, dev_addr, size, dir, attrs);
-}
-
-static void
-bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
-		enum dma_data_direction dir, unsigned long attrs)
-{
-	struct scatterlist *sg;
-	int i;
-
-	for_each_sg(sglist, sg, nelems, i)
-		bounce_unmap_page(dev, sg->dma_address,
-				  sg_dma_len(sg), dir, attrs);
-}
-
-static int
-bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
-	      enum dma_data_direction dir, unsigned long attrs)
-{
-	int i;
-	struct scatterlist *sg;
-
-	for_each_sg(sglist, sg, nelems, i) {
-		sg->dma_address = bounce_map_page(dev, sg_page(sg),
-						  sg->offset, sg->length,
-						  dir, attrs);
-		if (sg->dma_address == DMA_MAPPING_ERROR)
-			goto out_unmap;
-		sg_dma_len(sg) = sg->length;
-	}
-
-	for_each_sg(sglist, sg, nelems, i)
-		trace_bounce_map_sg(dev, i + 1, nelems, sg);
-
-	return nelems;
-
-out_unmap:
-	bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
-	return 0;
-}
-
-static void
-bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
-			   size_t size, enum dma_data_direction dir)
-{
-	bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
-}
-
-static void
-bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
-			      size_t size, enum dma_data_direction dir)
-{
-	bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
-}
-
-static void
-bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
-		       int nelems, enum dma_data_direction dir)
-{
-	struct scatterlist *sg;
-	int i;
-
-	for_each_sg(sglist, sg, nelems, i)
-		bounce_sync_single(dev, sg_dma_address(sg),
-				   sg_dma_len(sg), dir, SYNC_FOR_CPU);
-}
-
-static void
-bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
-			  int nelems, enum dma_data_direction dir)
-{
-	struct scatterlist *sg;
-	int i;
-
-	for_each_sg(sglist, sg, nelems, i)
-		bounce_sync_single(dev, sg_dma_address(sg),
-				   sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
-}
-
-static const struct dma_map_ops bounce_dma_ops = {
-	.alloc			= intel_alloc_coherent,
-	.free			= intel_free_coherent,
-	.map_sg			= bounce_map_sg,
-	.unmap_sg		= bounce_unmap_sg,
-	.map_page		= bounce_map_page,
-	.unmap_page		= bounce_unmap_page,
-	.sync_single_for_cpu	= bounce_sync_single_for_cpu,
-	.sync_single_for_device	= bounce_sync_single_for_device,
-	.sync_sg_for_cpu	= bounce_sync_sg_for_cpu,
-	.sync_sg_for_device	= bounce_sync_sg_for_device,
-	.map_resource		= bounce_map_resource,
-	.unmap_resource		= bounce_unmap_resource,
-	.dma_supported		= dma_direct_supported,
-};
-
 static inline int iommu_domain_cache_init(void)
 {
 	int ret = 0;
@@ -4636,7 +3987,7 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
 			if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
 				continue;
 
-			free_cpu_cached_iovas(cpu, &domain->iovad);
+			iommu_dma_free_cpu_cached_iovas(cpu, &domain->domain);
 		}
 	}
 }
@@ -4908,12 +4259,6 @@ int __init intel_iommu_init(void)
 	if (list_empty(&dmar_atsr_units))
 		pr_info("No ATSR found\n");
 
-	if (dmar_init_reserved_ranges()) {
-		if (force_on)
-			panic("tboot: Failed to reserve iommu ranges\n");
-		goto out_free_reserved_range;
-	}
-
 	if (dmar_map_gfx)
 		intel_iommu_gfx_mapped = 1;
 
@@ -4924,7 +4269,7 @@ int __init intel_iommu_init(void)
 		if (force_on)
 			panic("tboot: Failed to initialize DMARs\n");
 		pr_err("Initialization failed\n");
-		goto out_free_reserved_range;
+		goto out_free_dmar;
 	}
 	up_write(&dmar_global_lock);
 
@@ -4965,8 +4310,6 @@ int __init intel_iommu_init(void)
 
 	return 0;
 
-out_free_reserved_range:
-	put_iova_domain(&reserved_iova_list);
 out_free_dmar:
 	intel_iommu_free_dmars();
 	up_write(&dmar_global_lock);
@@ -5064,17 +4407,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
 	return 0;
 }
 
-static void intel_init_iova_domain(struct dmar_domain *dmar_domain)
-{
-	init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-	copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad);
-
-	if (!intel_iommu_strict &&
-	    init_iova_flush_queue(&dmar_domain->iovad,
-				  iommu_flush_iova, iova_entry_free))
-		pr_info("iova flush queue initialization failed\n");
-}
-
 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 {
 	struct dmar_domain *dmar_domain;
@@ -5094,8 +4426,9 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 			return NULL;
 		}
 
-		if (type == IOMMU_DOMAIN_DMA)
-			intel_init_iova_domain(dmar_domain);
+		if (type == IOMMU_DOMAIN_DMA &&
+		    iommu_get_dma_cookie(&dmar_domain->domain))
+			return NULL;
 
 		domain_update_iommu_cap(dmar_domain);
 
@@ -5727,13 +5060,13 @@ static void intel_iommu_release_device(struct device *dev)
 
 static void intel_iommu_probe_finalize(struct device *dev)
 {
-	struct iommu_domain *domain;
+	dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
+	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
 
-	domain = iommu_get_domain_for_dev(dev);
-	if (device_needs_bounce(dev))
-		set_dma_ops(dev, &bounce_dma_ops);
-	else if (domain && domain->type == IOMMU_DOMAIN_DMA)
-		set_dma_ops(dev, &intel_dma_ops);
+	if (domain && domain->type == IOMMU_DOMAIN_DMA)
+		iommu_setup_dma_ops(dev, base,
+				    __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
 	else
 		set_dma_ops(dev, NULL);
 }
@@ -5846,19 +5179,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
 	return ret;
 }
 
-static void intel_iommu_apply_resv_region(struct device *dev,
-					  struct iommu_domain *domain,
-					  struct iommu_resv_region *region)
-{
-	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
-	unsigned long start, end;
-
-	start = IOVA_PFN(region->start);
-	end   = IOVA_PFN(region->start + region->length - 1);
-
-	WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
-}
-
 static struct iommu_group *intel_iommu_device_group(struct device *dev)
 {
 	if (dev_is_pci(dev))
@@ -6047,6 +5367,27 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
 	return ret;
 }
 
+static int
+intel_iommu_domain_get_attr(struct iommu_domain *domain,
+			    enum iommu_attr attr, void *data)
+{
+	switch (domain->type) {
+	case IOMMU_DOMAIN_UNMANAGED:
+		return -ENODEV;
+	case IOMMU_DOMAIN_DMA:
+		switch (attr) {
+		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+			*(int *)data = !intel_iommu_strict;
+			return 0;
+		default:
+			return -ENODEV;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+}
+
 /*
  * Check that the device does not live on an external facing PCI port that is
  * marked as untrusted. Such devices should not be able to apply quirks and
@@ -6076,14 +5417,15 @@ const struct iommu_ops intel_iommu_ops = {
 	.aux_get_pasid		= intel_iommu_aux_get_pasid,
 	.map			= intel_iommu_map,
 	.unmap			= intel_iommu_unmap,
+	.flush_iotlb_all        = intel_flush_iotlb_all,
 	.iotlb_sync		= intel_iommu_tlb_sync,
 	.iova_to_phys		= intel_iommu_iova_to_phys,
 	.probe_device		= intel_iommu_probe_device,
 	.probe_finalize		= intel_iommu_probe_finalize,
 	.release_device		= intel_iommu_release_device,
+	.domain_get_attr        = intel_iommu_domain_get_attr,
 	.get_resv_regions	= intel_iommu_get_resv_regions,
 	.put_resv_regions	= generic_iommu_put_resv_regions,
-	.apply_resv_region	= intel_iommu_apply_resv_region,
 	.device_group		= intel_iommu_device_group,
 	.dev_has_feat		= intel_iommu_dev_has_feat,
 	.dev_feat_enabled	= intel_iommu_dev_feat_enabled,
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v3 6/6] iommu/vt-d: Cleanup after converting to dma-iommu ops
  2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
                   ` (4 preceding siblings ...)
  2020-09-12  3:21 ` [PATCH v3 5/6] iommu/vt-d: Convert intel iommu driver to the iommu ops Lu Baolu
@ 2020-09-12  3:22 ` Lu Baolu
  2020-09-14  8:04 ` [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Tvrtko Ursulin
  2020-09-18 20:47 ` [Intel-gfx] " Logan Gunthorpe
  7 siblings, 0 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-12  3:22 UTC (permalink / raw)
  To: Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Tvrtko Ursulin, Ashok Raj, Intel-gfx, linux-kernel, iommu

Some cleanups after converting the driver to use dma-iommu ops.
- Remove nobounce option;
- Cleanup and simplify the path in domain mapping.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 .../admin-guide/kernel-parameters.txt         |  5 --
 drivers/iommu/intel/iommu.c                   | 90 ++++++-------------
 2 files changed, 28 insertions(+), 67 deletions(-)

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a1068742a6df..0d11ef43d314 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1866,11 +1866,6 @@
 			Note that using this option lowers the security
 			provided by tboot because it makes the system
 			vulnerable to DMA attacks.
-		nobounce [Default off]
-			Disable bounce buffer for untrusted devices such as
-			the Thunderbolt devices. This will treat the untrusted
-			devices as the trusted ones, hence might expose security
-			risks of DMA attacks.
 
 	intel_idle.max_cstate=	[KNL,HW,ACPI,X86]
 			0	disables intel_idle and fall back on acpi_idle.
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index adc231790e0a..fe2544c95013 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -355,7 +355,6 @@ static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
-static int intel_no_bounce;
 static int iommu_skip_te_disable;
 
 #define IDENTMAP_GFX		2
@@ -457,9 +456,6 @@ static int __init intel_iommu_setup(char *str)
 		} else if (!strncmp(str, "tboot_noforce", 13)) {
 			pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
 			intel_iommu_tboot_noforce = 1;
-		} else if (!strncmp(str, "nobounce", 8)) {
-			pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
-			intel_no_bounce = 1;
 		}
 
 		str += strcspn(str, ",");
@@ -2230,15 +2226,14 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
 	return level;
 }
 
-static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-			    struct scatterlist *sg, unsigned long phys_pfn,
-			    unsigned long nr_pages, int prot)
+static int
+__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+		 unsigned long phys_pfn, unsigned long nr_pages, int prot)
 {
 	struct dma_pte *first_pte = NULL, *pte = NULL;
-	phys_addr_t pteval;
-	unsigned long sg_res = 0;
 	unsigned int largepage_lvl = 0;
 	unsigned long lvl_pages = 0;
+	phys_addr_t pteval;
 	u64 attr;
 
 	BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
@@ -2250,26 +2245,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 	if (domain_use_first_level(domain))
 		attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
 
-	if (!sg) {
-		sg_res = nr_pages;
-		pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
-	}
+	pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
 
 	while (nr_pages > 0) {
 		uint64_t tmp;
 
-		if (!sg_res) {
-			unsigned int pgoff = sg->offset & ~PAGE_MASK;
-
-			sg_res = aligned_nrpages(sg->offset, sg->length);
-			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
-			sg->dma_length = sg->length;
-			pteval = (sg_phys(sg) - pgoff) | attr;
-			phys_pfn = pteval >> VTD_PAGE_SHIFT;
-		}
-
 		if (!pte) {
-			largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
+			largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
+					phys_pfn, nr_pages);
 
 			first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
 			if (!pte)
@@ -2281,7 +2264,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 				pteval |= DMA_PTE_LARGE_PAGE;
 				lvl_pages = lvl_to_nr_pages(largepage_lvl);
 
-				nr_superpages = sg_res / lvl_pages;
+				nr_superpages = nr_pages / lvl_pages;
 				end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
 
 				/*
@@ -2315,48 +2298,45 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 		lvl_pages = lvl_to_nr_pages(largepage_lvl);
 
 		BUG_ON(nr_pages < lvl_pages);
-		BUG_ON(sg_res < lvl_pages);
 
 		nr_pages -= lvl_pages;
 		iov_pfn += lvl_pages;
 		phys_pfn += lvl_pages;
 		pteval += lvl_pages * VTD_PAGE_SIZE;
-		sg_res -= lvl_pages;
 
 		/* If the next PTE would be the first in a new page, then we
-		   need to flush the cache on the entries we've just written.
-		   And then we'll need to recalculate 'pte', so clear it and
-		   let it get set again in the if (!pte) block above.
-
-		   If we're done (!nr_pages) we need to flush the cache too.
-
-		   Also if we've been setting superpages, we may need to
-		   recalculate 'pte' and switch back to smaller pages for the
-		   end of the mapping, if the trailing size is not enough to
-		   use another superpage (i.e. sg_res < lvl_pages). */
+		 * need to flush the cache on the entries we've just written.
+		 * And then we'll need to recalculate 'pte', so clear it and
+		 * let it get set again in the if (!pte) block above.
+		 *
+		 * If we're done (!nr_pages) we need to flush the cache too.
+		 *
+		 * Also if we've been setting superpages, we may need to
+		 * recalculate 'pte' and switch back to smaller pages for the
+		 * end of the mapping, if the trailing size is not enough to
+		 * use another superpage (i.e. nr_pages < lvl_pages).
+		 */
 		pte++;
 		if (!nr_pages || first_pte_in_page(pte) ||
-		    (largepage_lvl > 1 && sg_res < lvl_pages)) {
+		    (largepage_lvl > 1 && nr_pages < lvl_pages)) {
 			domain_flush_cache(domain, first_pte,
 					   (void *)pte - (void *)first_pte);
 			pte = NULL;
 		}
-
-		if (!sg_res && nr_pages)
-			sg = sg_next(sg);
 	}
+
 	return 0;
 }
 
-static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-			  struct scatterlist *sg, unsigned long phys_pfn,
-			  unsigned long nr_pages, int prot)
+static int
+domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+	       unsigned long phys_pfn, unsigned long nr_pages, int prot)
 {
 	int iommu_id, ret;
 	struct intel_iommu *iommu;
 
 	/* Do the real mapping first */
-	ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+	ret = __domain_mapping(domain, iov_pfn, phys_pfn, nr_pages, prot);
 	if (ret)
 		return ret;
 
@@ -2368,20 +2348,6 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 	return 0;
 }
 
-static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-				    struct scatterlist *sg, unsigned long nr_pages,
-				    int prot)
-{
-	return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
-}
-
-static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-				     unsigned long phys_pfn, unsigned long nr_pages,
-				     int prot)
-{
-	return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
-}
-
 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
 	unsigned long flags;
@@ -2638,7 +2604,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
 	 */
 	dma_pte_clear_range(domain, first_vpfn, last_vpfn);
 
-	return __domain_mapping(domain, first_vpfn, NULL,
+	return __domain_mapping(domain, first_vpfn,
 				first_vpfn, last_vpfn - first_vpfn + 1,
 				DMA_PTE_READ|DMA_PTE_WRITE);
 }
@@ -4895,8 +4861,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
 	/* Round up size to next multiple of PAGE_SIZE, if it and
 	   the low bits of hpa would take us onto the next page */
 	size = aligned_nrpages(hpa, size);
-	ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
-				 hpa >> VTD_PAGE_SHIFT, size, prot);
+	ret = domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+			     hpa >> VTD_PAGE_SHIFT, size, prot);
 	return ret;
 }
 
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
                   ` (5 preceding siblings ...)
  2020-09-12  3:22 ` [PATCH v3 6/6] iommu/vt-d: Cleanup after converting to dma-iommu ops Lu Baolu
@ 2020-09-14  8:04 ` Tvrtko Ursulin
  2020-09-15  1:47   ` Lu Baolu
  2020-09-18 20:47 ` [Intel-gfx] " Logan Gunthorpe
  7 siblings, 1 reply; 20+ messages in thread
From: Tvrtko Ursulin @ 2020-09-14  8:04 UTC (permalink / raw)
  To: Lu Baolu, Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Intel-gfx, Ashok Raj, iommu, linux-kernel


Hi,

On 12/09/2020 04:21, Lu Baolu wrote:
> Tom Murphy has almost done all the work. His latest patch series was
> posted here.
> 
> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/
> 
> Thanks a lot!
> 
> This series is a follow-up with below changes:
> 
> 1. Add a quirk for the i915 driver issue described in Tom's cover
> letter.

Last week I have copied you on an i915 series which appears to remove the need for this quirk. so if we get those i915 patches reviewed and merged, do you still want to pursue this quirk?

> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
> bounce buffers" to make the bounce buffer work for untrusted devices.
> 3. Several cleanups in iommu/vt-d driver after the conversion.

With the previous version of the series I hit a problem on Ivybridge where apparently the dma engine width is not respected. At least that is my layman interpretation of the errors. From the older thread:

<3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not sufficient for the mapped address (ffff008000)

Relevant iommu boot related messages are:

<6>[    0.184234] DMAR: Host address width 36
<6>[    0.184245] DMAR: DRHD base: 0x000000fed90000 flags: 0x0
<6>[    0.184288] DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap c0000020e60262 ecap f0101a
<6>[    0.184308] DMAR: DRHD base: 0x000000fed91000 flags: 0x1
<6>[    0.184337] DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap c9008020660262 ecap f0105a
<6>[    0.184357] DMAR: RMRR base: 0x000000d8d28000 end: 0x000000d8d46fff
<6>[    0.184377] DMAR: RMRR base: 0x000000db000000 end: 0x000000df1fffff
<6>[    0.184398] DMAR-IR: IOAPIC id 2 under DRHD base  0xfed91000 IOMMU 1
<6>[    0.184414] DMAR-IR: HPET id 0 under DRHD base 0xfed91000
<6>[    0.184428] DMAR-IR: Queued invalidation will be enabled to support x2apic and Intr-remapping.
<6>[    0.185173] DMAR-IR: Enabled IRQ remapping in x2apic mode

<6>[    0.878934] DMAR: No ATSR found
<6>[    0.878966] DMAR: dmar0: Using Queued invalidation
<6>[    0.879007] DMAR: dmar1: Using Queued invalidation

<6>[    0.915032] DMAR: Intel(R) Virtualization Technology for Directed I/O
<6>[    0.915060] PCI-DMA: Using software bounce buffering for IO (SWIOTLB)
<6>[    0.915084] software IO TLB: mapped [mem 0xc80d4000-0xcc0d4000] (64MB)

(Full boot log at https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/boot0.txt, failures at https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/igt@i915_selftest@live@blt.html.)

Does this look familiar or at least plausible to you? Is this something your new series has fixed?

Regards,

Tvrtko

> 
> Please review and test.
> 
> Best regards,
> baolu
> 
> Lu Baolu (2):
>    iommu: Add quirk for Intel graphic devices in map_sg
>    iommu/vt-d: Cleanup after converting to dma-iommu ops
> 
> Tom Murphy (4):
>    iommu: Handle freelists when using deferred flushing in iommu drivers
>    iommu: Add iommu_dma_free_cpu_cached_iovas()
>    iommu: Allow the dma-iommu api to use bounce buffers
>    iommu/vt-d: Convert intel iommu driver to the iommu ops
> 
>   .../admin-guide/kernel-parameters.txt         |   5 -
>   drivers/iommu/dma-iommu.c                     | 229 ++++-
>   drivers/iommu/intel/Kconfig                   |   1 +
>   drivers/iommu/intel/iommu.c                   | 885 +++---------------
>   include/linux/dma-iommu.h                     |   8 +
>   include/linux/iommu.h                         |   1 +
>   6 files changed, 323 insertions(+), 806 deletions(-)
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-14  8:04 ` [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Tvrtko Ursulin
@ 2020-09-15  1:47   ` Lu Baolu
  2020-09-15  8:31     ` Tvrtko Ursulin
  0 siblings, 1 reply; 20+ messages in thread
From: Lu Baolu @ 2020-09-15  1:47 UTC (permalink / raw)
  To: Tvrtko Ursulin, Joerg Roedel, Tom Murphy, David Woodhouse,
	Christoph Hellwig
  Cc: linux-kernel, Intel-gfx, Ashok Raj, iommu

Hi Tvrtko,

On 9/14/20 4:04 PM, Tvrtko Ursulin wrote:
> 
> Hi,
> 
> On 12/09/2020 04:21, Lu Baolu wrote:
>> Tom Murphy has almost done all the work. His latest patch series was
>> posted here.
>>
>> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/
>>
>> Thanks a lot!
>>
>> This series is a follow-up with below changes:
>>
>> 1. Add a quirk for the i915 driver issue described in Tom's cover
>> letter.
> 
> Last week I have copied you on an i915 series which appears to remove the need for this quirk. so if we get those i915 patches reviewed and merged, do you still want to pursue this quirk?

It's up to the graphic guys. I don't know the details in i915 driver.
I don't think my tests could cover all cases.

> 
>> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
>> bounce buffers" to make the bounce buffer work for untrusted devices.
>> 3. Several cleanups in iommu/vt-d driver after the conversion.
> 
> With the previous version of the series I hit a problem on Ivybridge where apparently the dma engine width is not respected. At least that is my layman interpretation of the errors. From the older thread:
> 
> <3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not sufficient for the mapped address (ffff008000)
> 
> Relevant iommu boot related messages are:
> 
> <6>[    0.184234] DMAR: Host address width 36
> <6>[    0.184245] DMAR: DRHD base: 0x000000fed90000 flags: 0x0
> <6>[    0.184288] DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap c0000020e60262 ecap f0101a
> <6>[    0.184308] DMAR: DRHD base: 0x000000fed91000 flags: 0x1
> <6>[    0.184337] DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap c9008020660262 ecap f0105a
> <6>[    0.184357] DMAR: RMRR base: 0x000000d8d28000 end: 0x000000d8d46fff
> <6>[    0.184377] DMAR: RMRR base: 0x000000db000000 end: 0x000000df1fffff
> <6>[    0.184398] DMAR-IR: IOAPIC id 2 under DRHD base  0xfed91000 IOMMU 1
> <6>[    0.184414] DMAR-IR: HPET id 0 under DRHD base 0xfed91000
> <6>[    0.184428] DMAR-IR: Queued invalidation will be enabled to support x2apic and Intr-remapping.
> <6>[    0.185173] DMAR-IR: Enabled IRQ remapping in x2apic mode
> 
> <6>[    0.878934] DMAR: No ATSR found
> <6>[    0.878966] DMAR: dmar0: Using Queued invalidation
> <6>[    0.879007] DMAR: dmar1: Using Queued invalidation
> 
> <6>[    0.915032] DMAR: Intel(R) Virtualization Technology for Directed I/O
> <6>[    0.915060] PCI-DMA: Using software bounce buffering for IO (SWIOTLB)
> <6>[    0.915084] software IO TLB: mapped [mem 0xc80d4000-0xcc0d4000] (64MB)
> 
> (Full boot log at https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/boot0.txt, failures at https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/igt@i915_selftest@live@blt.html.)
> 
> Does this look familiar or at least plausible to you? Is this something your new series has fixed?

This happens during attaching a domain to device. It has nothing to do
with this patch series. I will look into this issue, but not in this
email thread context.

Best regards,
baolu

> 
> Regards,
> 
> Tvrtko
> 
>>
>> Please review and test.
>>
>> Best regards,
>> baolu
>>
>> Lu Baolu (2):
>>     iommu: Add quirk for Intel graphic devices in map_sg
>>     iommu/vt-d: Cleanup after converting to dma-iommu ops
>>
>> Tom Murphy (4):
>>     iommu: Handle freelists when using deferred flushing in iommu drivers
>>     iommu: Add iommu_dma_free_cpu_cached_iovas()
>>     iommu: Allow the dma-iommu api to use bounce buffers
>>     iommu/vt-d: Convert intel iommu driver to the iommu ops
>>
>>    .../admin-guide/kernel-parameters.txt         |   5 -
>>    drivers/iommu/dma-iommu.c                     | 229 ++++-
>>    drivers/iommu/intel/Kconfig                   |   1 +
>>    drivers/iommu/intel/iommu.c                   | 885 +++---------------
>>    include/linux/dma-iommu.h                     |   8 +
>>    include/linux/iommu.h                         |   1 +
>>    6 files changed, 323 insertions(+), 806 deletions(-)
>>
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-15  1:47   ` Lu Baolu
@ 2020-09-15  8:31     ` Tvrtko Ursulin
  2020-09-22 11:05       ` Robin Murphy
  2020-09-24  2:35       ` Lu Baolu
  0 siblings, 2 replies; 20+ messages in thread
From: Tvrtko Ursulin @ 2020-09-15  8:31 UTC (permalink / raw)
  To: Lu Baolu, Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Intel-gfx, Ashok Raj, iommu, linux-kernel


On 15/09/2020 02:47, Lu Baolu wrote:
> Hi Tvrtko,
> 
> On 9/14/20 4:04 PM, Tvrtko Ursulin wrote:
>>
>> Hi,
>>
>> On 12/09/2020 04:21, Lu Baolu wrote:
>>> Tom Murphy has almost done all the work. His latest patch series was
>>> posted here.
>>>
>>> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/ 
>>>
>>>
>>> Thanks a lot!
>>>
>>> This series is a follow-up with below changes:
>>>
>>> 1. Add a quirk for the i915 driver issue described in Tom's cover
>>> letter.
>>
>> Last week I have copied you on an i915 series which appears to remove 
>> the need for this quirk. so if we get those i915 patches reviewed and 
>> merged, do you still want to pursue this quirk?
> 
> It's up to the graphic guys. I don't know the details in i915 driver.
> I don't think my tests could cover all cases.

I am the graphic guy. :) I just need some reviews (internally) for my 
series and then we can merge it, at which point you don't need the quirk 
patch any more. I'll try to accelerate this.

With regards to testing, you could send your series with my patches on 
top to our trybot mailing list (intel-gfx-trybot@lists.freedesktop.org / 
https://patchwork.freedesktop.org/project/intel-gfx-trybot/series/?ordering=-last_updated) 
which would show you if it is still hitting the DMAR issues in i915.

>>
>>> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
>>> bounce buffers" to make the bounce buffer work for untrusted devices.
>>> 3. Several cleanups in iommu/vt-d driver after the conversion.
>>
>> With the previous version of the series I hit a problem on Ivybridge 
>> where apparently the dma engine width is not respected. At least that 
>> is my layman interpretation of the errors. From the older thread:
>>
>> <3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not 
>> sufficient for the mapped address (ffff008000)
>>
>> Relevant iommu boot related messages are:
>>
>> <6>[    0.184234] DMAR: Host address width 36
>> <6>[    0.184245] DMAR: DRHD base: 0x000000fed90000 flags: 0x0
>> <6>[    0.184288] DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap 
>> c0000020e60262 ecap f0101a
>> <6>[    0.184308] DMAR: DRHD base: 0x000000fed91000 flags: 0x1
>> <6>[    0.184337] DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap 
>> c9008020660262 ecap f0105a
>> <6>[    0.184357] DMAR: RMRR base: 0x000000d8d28000 end: 0x000000d8d46fff
>> <6>[    0.184377] DMAR: RMRR base: 0x000000db000000 end: 0x000000df1fffff
>> <6>[    0.184398] DMAR-IR: IOAPIC id 2 under DRHD base  0xfed91000 
>> IOMMU 1
>> <6>[    0.184414] DMAR-IR: HPET id 0 under DRHD base 0xfed91000
>> <6>[    0.184428] DMAR-IR: Queued invalidation will be enabled to 
>> support x2apic and Intr-remapping.
>> <6>[    0.185173] DMAR-IR: Enabled IRQ remapping in x2apic mode
>>
>> <6>[    0.878934] DMAR: No ATSR found
>> <6>[    0.878966] DMAR: dmar0: Using Queued invalidation
>> <6>[    0.879007] DMAR: dmar1: Using Queued invalidation
>>
>> <6>[    0.915032] DMAR: Intel(R) Virtualization Technology for 
>> Directed I/O
>> <6>[    0.915060] PCI-DMA: Using software bounce buffering for IO 
>> (SWIOTLB)
>> <6>[    0.915084] software IO TLB: mapped [mem 0xc80d4000-0xcc0d4000] 
>> (64MB)
>>
>> (Full boot log at 
>> https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/boot0.txt, 
>> failures at 
>> https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/igt@i915_selftest@live@blt.html.) 
>>
>>
>> Does this look familiar or at least plausible to you? Is this 
>> something your new series has fixed?
> 
> This happens during attaching a domain to device. It has nothing to do
> with this patch series. I will look into this issue, but not in this
> email thread context.

I am not sure what step is attaching domain to device, but these type 
messages:

<3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not
 >> sufficient for the mapped address (ffff008000)

They definitely appear to happen at runtime, as i915 is getting 
exercised by userspace.

Regards,

Tvrtko
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
                   ` (6 preceding siblings ...)
  2020-09-14  8:04 ` [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Tvrtko Ursulin
@ 2020-09-18 20:47 ` Logan Gunthorpe
  2020-09-20  6:36   ` Lu Baolu
  2020-09-22  9:51   ` Robin Murphy
  7 siblings, 2 replies; 20+ messages in thread
From: Logan Gunthorpe @ 2020-09-18 20:47 UTC (permalink / raw)
  To: Lu Baolu, Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Intel-gfx, Ashok Raj, iommu, linux-kernel

Hi Lu,

On 2020-09-11 9:21 p.m., Lu Baolu wrote:
> Tom Murphy has almost done all the work. His latest patch series was
> posted here.
>
> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/
>
> Thanks a lot!
>
> This series is a follow-up with below changes:
>
> 1. Add a quirk for the i915 driver issue described in Tom's cover
> letter.
> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
> bounce buffers" to make the bounce buffer work for untrusted devices.
> 3. Several cleanups in iommu/vt-d driver after the conversion.
>

I'm trying to test this on an old Sandy Bridge, but found that I get
spammed with warnings on boot. I've put a sample of a few of them below.
They all seem to be related to ioat.

I had the same issue with Tom's v2 but never saw this on his v1.

Thanks,

Logan


[   44.057877] ------------[ cut here ]------------
[   44.063167] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   44.073351] Modules linked in:
[   44.076882] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   44.087935] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   44.096650] Workqueue: events work_for_cpu_fn
[   44.101644] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   44.107225] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   44.128443] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   44.134413] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   44.144487] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   44.152613] RBP: 00000000fec00000 R08: 00000000000fec00 R09: 00000000000fedff
[   44.160733] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c39000
[   44.168861] R13: ffff888472d85ee8 R14: 0000000000080000 R15: 0000000000000000
[   44.176980] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   44.186198] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   44.192761] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   44.200871] Call Trace:
[   44.203716]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   44.209509]  iommu_dma_free+0x18/0x30
[   44.213734]  ioat_free_chan_resources+0x19e/0x300
[   44.219133]  ioat_dma_self_test+0x2a0/0x3d0
[   44.223964]  ioat_pci_probe+0x60e/0x903
[   44.228387]  local_pci_probe+0x42/0x80
[   44.232721]  work_for_cpu_fn+0x16/0x20
[   44.237037]  process_one_work+0x292/0x630
[   44.241644]  ? __schedule+0x344/0x970
[   44.245868]  worker_thread+0x227/0x3e0
[   44.250185]  ? process_one_work+0x630/0x630
[   44.254989]  kthread+0x136/0x150
[   44.258718]  ? kthread_use_mm+0x100/0x100
[   44.263334]  ret_from_fork+0x22/0x30
[   44.267474] irq event stamp: 1881262
[   44.271602] hardirqs last  enabled at (1881272): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   44.281593] hardirqs last disabled at (1881281): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   44.291588] softirqs last  enabled at (1747140): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   44.301285] softirqs last disabled at (1747144): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   44.312153] ---[ end trace ed0f88fd959a5a39 ]---
[   44.353963] ------------[ cut here ]------------
[   44.359291] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   44.369482] Modules linked in:
[   44.373030] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   44.384097] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   44.392825] Workqueue: events work_for_cpu_fn
[   44.397831] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   44.403421] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   44.424644] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   44.430627] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   44.438770] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   44.446885] RBP: 00000000ffc00000 R08: 00000000000ffc00 R09: 00000000000ffdff
[   44.455000] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
[   44.463119] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
[   44.471235] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   44.480440] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   44.487004] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   44.495128] Call Trace:
[   44.498008]  iommu_dma_free+0x18/0x30
[   44.502230]  ioat_free_chan_resources+0x19e/0x300
[   44.507631]  ioat_dma_self_test+0x2a0/0x3d0
[   44.512466]  ioat_pci_probe+0x60e/0x903
[   44.516889]  local_pci_probe+0x42/0x80
[   44.521217]  work_for_cpu_fn+0x16/0x20
[   44.525540]  process_one_work+0x292/0x630
[   44.530157]  ? __schedule+0x344/0x970
[   44.534393]  worker_thread+0x227/0x3e0
[   44.538720]  ? process_one_work+0x630/0x630
[   44.543531]  kthread+0x136/0x150
[   44.547270]  ? kthread_use_mm+0x100/0x100
[   44.551889]  ret_from_fork+0x22/0x30
[   44.556034] irq event stamp: 2145654
[   44.560161] hardirqs last  enabled at (2145664): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   44.570158] hardirqs last disabled at (2145673): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   44.580153] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   44.589854] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   44.600729] ---[ end trace ed0f88fd959a5a3a ]---
[   44.606043] ------------[ cut here ]------------
[   44.611331] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   44.621524] Modules linked in:
[   44.625050] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   44.636119] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   44.644834] Workqueue: events work_for_cpu_fn
[   44.649827] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   44.655410] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   44.676618] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   44.682588] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   44.690702] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   44.698816] RBP: 00000000ffa00000 R08: 00000000000ffa00 R09: 00000000000ffbff
[   44.706934] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
[   44.715054] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
[   44.723174] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   44.732384] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   44.738941] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   44.747060] Call Trace:
[   44.749921]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   44.755706]  iommu_dma_free+0x18/0x30
[   44.759931]  ioat_free_chan_resources+0x19e/0x300
[   44.765332]  ioat_dma_self_test+0x2a0/0x3d0
[   44.770156]  ioat_pci_probe+0x60e/0x903
[   44.774583]  local_pci_probe+0x42/0x80
[   44.778909]  work_for_cpu_fn+0x16/0x20
[   44.783232]  process_one_work+0x292/0x630
[   44.787846]  ? __schedule+0x344/0x970
[   44.792079]  worker_thread+0x227/0x3e0
[   44.796403]  ? process_one_work+0x630/0x630
[   44.801205]  kthread+0x136/0x150
[   44.804946]  ? kthread_use_mm+0x100/0x100
[   44.809563]  ret_from_fork+0x22/0x30
[   44.813704] irq event stamp: 2146044
[   44.817831] hardirqs last  enabled at (2146054): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   44.827821] hardirqs last disabled at (2146063): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   44.837810] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   44.847503] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   44.858366] ---[ end trace ed0f88fd959a5a3b ]---
[   44.863675] ------------[ cut here ]------------
[   44.868969] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   44.879148] Modules linked in:
[   44.882689] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   44.893760] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   44.902487] Workqueue: events work_for_cpu_fn
[   44.907493] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   44.913080] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   44.934290] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   44.940254] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   44.948379] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   44.956505] RBP: 00000000ff800000 R08: 00000000000ff800 R09: 00000000000ff9ff
[   44.964627] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
[   44.972748] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
[   44.980871] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   44.990084] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   44.996644] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   45.004759] Call Trace:
[   45.007600]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   45.013391]  iommu_dma_free+0x18/0x30
[   45.017609]  ioat_free_chan_resources+0x19e/0x300
[   45.022992]  ioat_dma_self_test+0x2a0/0x3d0
[   45.027809]  ioat_pci_probe+0x60e/0x903
[   45.032237]  local_pci_probe+0x42/0x80
[   45.036563]  work_for_cpu_fn+0x16/0x20
[   45.040886]  process_one_work+0x292/0x630
[   45.045500]  ? __schedule+0x344/0x970
[   45.049735]  worker_thread+0x227/0x3e0
[   45.054061]  ? process_one_work+0x630/0x630
[   45.058864]  kthread+0x136/0x150
[   45.062597]  ? kthread_use_mm+0x100/0x100
[   45.067200]  ret_from_fork+0x22/0x30
[   45.071335] irq event stamp: 2146432
[   45.075458] hardirqs last  enabled at (2146442): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   45.085440] hardirqs last disabled at (2146451): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   45.095432] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   45.105134] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   45.116004] ---[ end trace ed0f88fd959a5a3c ]---
[   45.121305] ------------[ cut here ]------------
[   45.126596] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   45.136772] Modules linked in:
[   45.142288] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   45.153351] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   45.162077] Workqueue: events work_for_cpu_fn
[   45.167074] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   45.172663] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   45.193874] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   45.199850] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   45.207970] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   45.216095] RBP: 00000000ff600000 R08: 00000000000ff600 R09: 00000000000ff7ff
[   45.224207] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
[   45.232327] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
[   45.240446] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   45.249662] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   45.256225] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   45.264344] Call Trace:
[   45.267205]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   45.273003]  iommu_dma_free+0x18/0x30
[   45.277231]  ioat_free_chan_resources+0x19e/0x300
[   45.282631]  ioat_dma_self_test+0x2a0/0x3d0
[   45.287457]  ioat_pci_probe+0x60e/0x903
[   45.291894]  local_pci_probe+0x42/0x80
[   45.296217]  work_for_cpu_fn+0x16/0x20
[   45.300539]  process_one_work+0x292/0x630
[   45.305162]  ? __schedule+0x344/0x970
[   45.309399]  worker_thread+0x227/0x3e0
[   45.313724]  ? process_one_work+0x630/0x630
[   45.318535]  kthread+0x136/0x150
[   45.322274]  ? kthread_use_mm+0x100/0x100
[   45.326893]  ret_from_fork+0x22/0x30
[   45.331034] irq event stamp: 2146822
[   45.335161] hardirqs last  enabled at (2146832): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   45.345154] hardirqs last disabled at (2146841): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   45.355150] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   45.364850] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   45.375710] ---[ end trace ed0f88fd959a5a3d ]---
[   45.381018] ------------[ cut here ]------------
[   45.386308] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   45.396494] Modules linked in:
[   45.400020] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   45.411086] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   45.419810] Workqueue: events work_for_cpu_fn
[   45.424819] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   45.430406] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   45.451630] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   45.457610] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   45.465730] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   45.473853] RBP: 00000000ff400000 R08: 00000000000ff400 R09: 00000000000ff5ff
[   45.481973] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
[   45.490092] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
[   45.498211] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   45.507418] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   45.513969] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   45.522081] Call Trace:
[   45.524934]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   45.530718]  iommu_dma_free+0x18/0x30
[   45.534989]  ioat_free_chan_resources+0x19e/0x300
[   45.540378]  ioat_dma_self_test+0x2a0/0x3d0
[   45.545193]  ioat_pci_probe+0x60e/0x903
[   45.549619]  local_pci_probe+0x42/0x80
[   45.553940]  work_for_cpu_fn+0x16/0x20
[   45.558258]  process_one_work+0x292/0x630
[   45.562871]  ? __schedule+0x344/0x970
[   45.567108]  worker_thread+0x227/0x3e0
[   45.571434]  ? process_one_work+0x630/0x630
[   45.576243]  kthread+0x136/0x150
[   45.579981]  ? kthread_use_mm+0x100/0x100
[   45.584598]  ret_from_fork+0x22/0x30
[   45.588733] irq event stamp: 2147210
[   45.592860] hardirqs last  enabled at (2147220): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   45.602845] hardirqs last disabled at (2147229): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   45.612836] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   45.622531] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   45.633392] ---[ end trace ed0f88fd959a5a3e ]---
[   45.638708] ------------[ cut here ]------------
[   45.644003] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   45.654191] Modules linked in:
[   45.657734] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   45.668807] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   45.677534] Workqueue: events work_for_cpu_fn
[   45.682538] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   45.688126] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   45.709343] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   45.715321] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   45.723440] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   45.731555] RBP: 00000000ff200000 R08: 00000000000ff200 R09: 00000000000ff3ff
[   45.739665] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
[   45.747777] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
[   45.755897] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   45.765106] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   45.771661] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   45.779778] Call Trace:
[   45.782638]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   45.788439]  iommu_dma_free+0x18/0x30
[   45.792664]  ioat_free_chan_resources+0x19e/0x300
[   45.798060]  ioat_dma_self_test+0x2a0/0x3d0
[   45.802886]  ioat_pci_probe+0x60e/0x903
[   45.807306]  local_pci_probe+0x42/0x80
[   45.811633]  work_for_cpu_fn+0x16/0x20
[   45.815946]  process_one_work+0x292/0x630
[   45.820556]  ? __schedule+0x344/0x970
[   45.824791]  worker_thread+0x227/0x3e0
[   45.829117]  ? process_one_work+0x630/0x630
[   45.833969]  kthread+0x136/0x150
[   45.837706]  ? kthread_use_mm+0x100/0x100
[   45.842325]  ret_from_fork+0x22/0x30
[   45.846475] irq event stamp: 2147602
[   45.850608] hardirqs last  enabled at (2147612): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   45.860603] hardirqs last disabled at (2147621): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   45.870589] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   45.880293] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   45.891163] ---[ end trace ed0f88fd959a5a3f ]---
[   45.896473] ------------[ cut here ]------------
[   45.901762] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   45.911956] Modules linked in:
[   45.915503] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   45.926570] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   45.935302] Workqueue: events work_for_cpu_fn
[   45.940312] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   45.945905] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   45.967128] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   45.973111] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   45.981236] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   45.989363] RBP: 00000000ff000000 R08: 00000000000ff000 R09: 00000000000ff1ff
[   45.997489] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
[   46.005613] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
[   46.013738] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   46.022952] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   46.029521] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   46.037643] Call Trace:
[   46.040508]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   46.046305]  iommu_dma_free+0x18/0x30
[   46.050538]  ioat_free_chan_resources+0x19e/0x300
[   46.055941]  ioat_dma_self_test+0x2a0/0x3d0
[   46.060765]  ioat_pci_probe+0x60e/0x903
[   46.065196]  local_pci_probe+0x42/0x80
[   46.069526]  work_for_cpu_fn+0x16/0x20
[   46.073854]  process_one_work+0x292/0x630
[   46.078477]  ? __schedule+0x344/0x970
[   46.082717]  worker_thread+0x227/0x3e0
[   46.087035]  ? process_one_work+0x630/0x630
[   46.091849]  kthread+0x136/0x150
[   46.095594]  ? kthread_use_mm+0x100/0x100
[   46.100218]  ret_from_fork+0x22/0x30
[   46.104366] irq event stamp: 2147992
[   46.108499] hardirqs last  enabled at (2148002): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   46.120483] hardirqs last disabled at (2148011): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   46.130472] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   46.140171] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   46.151034] ---[ end trace ed0f88fd959a5a40 ]---
[   46.156339] ------------[ cut here ]------------
[   46.161628] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
[   46.171817] Modules linked in:
[   46.175365] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
[   46.186430] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
[   46.195163] Workqueue: events work_for_cpu_fn
[   46.200173] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   46.205767] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   46.226991] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   46.232976] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
[   46.241101] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
[   46.249226] RBP: 00000000fec00000 R08: 00000000000fec00 R09: 00000000000fedff
[   46.257352] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
[   46.265474] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
[   46.273602] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
[   46.282822] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   46.289388] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
[   46.297513] Call Trace:
[   46.300358]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   46.306152]  iommu_dma_free+0x18/0x30
[   46.310379]  ioat_free_chan_resources+0x19e/0x300
[   46.315776]  ioat_dma_self_test+0x2a0/0x3d0
[   46.320607]  ioat_pci_probe+0x60e/0x903
[   46.325044]  local_pci_probe+0x42/0x80
[   46.329373]  work_for_cpu_fn+0x16/0x20
[   46.333699]  process_one_work+0x292/0x630
[   46.338321]  ? __schedule+0x344/0x970
[   46.342563]  worker_thread+0x227/0x3e0
[   46.346891]  ? process_one_work+0x630/0x630
[   46.351704]  kthread+0x136/0x150
[   46.355449]  ? kthread_use_mm+0x100/0x100
[   46.360069]  ret_from_fork+0x22/0x30
[   46.364204] irq event stamp: 2148380
[   46.368338] hardirqs last  enabled at (2148390): [<ffffffff811b8465>] console_unlock+0x435/0x570
[   46.378336] hardirqs last disabled at (2148399): [<ffffffff811b845b>] console_unlock+0x42b/0x570
[   46.388335] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
[   46.398040] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
[   46.408914] ---[ end trace ed0f88fd959a5a41 ]---
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-18 20:47 ` [Intel-gfx] " Logan Gunthorpe
@ 2020-09-20  6:36   ` Lu Baolu
  2020-09-21 15:48     ` Logan Gunthorpe
  2020-09-22  9:51   ` Robin Murphy
  1 sibling, 1 reply; 20+ messages in thread
From: Lu Baolu @ 2020-09-20  6:36 UTC (permalink / raw)
  To: Logan Gunthorpe, Joerg Roedel, Tom Murphy, David Woodhouse,
	Christoph Hellwig
  Cc: linux-kernel, Intel-gfx, Ashok Raj, iommu

Hi Logan,

On 2020/9/19 4:47, Logan Gunthorpe wrote:
> Hi Lu,
> 
> On 2020-09-11 9:21 p.m., Lu Baolu wrote:
>> Tom Murphy has almost done all the work. His latest patch series was
>> posted here.
>>
>> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/
>>
>> Thanks a lot!
>>
>> This series is a follow-up with below changes:
>>
>> 1. Add a quirk for the i915 driver issue described in Tom's cover
>> letter.
>> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
>> bounce buffers" to make the bounce buffer work for untrusted devices.
>> 3. Several cleanups in iommu/vt-d driver after the conversion.
>>
> 
> I'm trying to test this on an old Sandy Bridge, but found that I get
> spammed with warnings on boot. I've put a sample of a few of them below.
> They all seem to be related to ioat.
> 
> I had the same issue with Tom's v2 but never saw this on his v1.

Have you verified whether this could be reproduced with the lasted
upstream kernel (without this patch series)?

Best regards,
baolu
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-20  6:36   ` Lu Baolu
@ 2020-09-21 15:48     ` Logan Gunthorpe
  2020-09-22  0:24       ` Lu Baolu
  0 siblings, 1 reply; 20+ messages in thread
From: Logan Gunthorpe @ 2020-09-21 15:48 UTC (permalink / raw)
  To: Lu Baolu, Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Intel-gfx, Ashok Raj, iommu, linux-kernel



On 2020-09-20 12:36 a.m., Lu Baolu wrote:
> Hi Logan,
> 
> On 2020/9/19 4:47, Logan Gunthorpe wrote:
>> Hi Lu,
>>
>> On 2020-09-11 9:21 p.m., Lu Baolu wrote:
>>> Tom Murphy has almost done all the work. His latest patch series was
>>> posted here.
>>>
>>> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/
>>>
>>> Thanks a lot!
>>>
>>> This series is a follow-up with below changes:
>>>
>>> 1. Add a quirk for the i915 driver issue described in Tom's cover
>>> letter.
>>> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
>>> bounce buffers" to make the bounce buffer work for untrusted devices.
>>> 3. Several cleanups in iommu/vt-d driver after the conversion.
>>>
>>
>> I'm trying to test this on an old Sandy Bridge, but found that I get
>> spammed with warnings on boot. I've put a sample of a few of them below.
>> They all seem to be related to ioat.
>>
>> I had the same issue with Tom's v2 but never saw this on his v1.
> 
> Have you verified whether this could be reproduced with the lasted
> upstream kernel (without this patch series)?

Yes. Also, it's hitting a warning in the dma-iommu code which would not
be hit without this patch set.

Thanks,

Logan
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-21 15:48     ` Logan Gunthorpe
@ 2020-09-22  0:24       ` Lu Baolu
  2020-09-22 15:38         ` Logan Gunthorpe
  0 siblings, 1 reply; 20+ messages in thread
From: Lu Baolu @ 2020-09-22  0:24 UTC (permalink / raw)
  To: Logan Gunthorpe, Joerg Roedel, Tom Murphy, David Woodhouse,
	Christoph Hellwig
  Cc: linux-kernel, Intel-gfx, Ashok Raj, iommu

Hi Logan,

On 9/21/20 11:48 PM, Logan Gunthorpe wrote:
> 
> 
> On 2020-09-20 12:36 a.m., Lu Baolu wrote:
>> Hi Logan,
>>
>> On 2020/9/19 4:47, Logan Gunthorpe wrote:
>>> Hi Lu,
>>>
>>> On 2020-09-11 9:21 p.m., Lu Baolu wrote:
>>>> Tom Murphy has almost done all the work. His latest patch series was
>>>> posted here.
>>>>
>>>> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/
>>>>
>>>> Thanks a lot!
>>>>
>>>> This series is a follow-up with below changes:
>>>>
>>>> 1. Add a quirk for the i915 driver issue described in Tom's cover
>>>> letter.
>>>> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
>>>> bounce buffers" to make the bounce buffer work for untrusted devices.
>>>> 3. Several cleanups in iommu/vt-d driver after the conversion.
>>>>
>>>
>>> I'm trying to test this on an old Sandy Bridge, but found that I get
>>> spammed with warnings on boot. I've put a sample of a few of them below.
>>> They all seem to be related to ioat.
>>>
>>> I had the same issue with Tom's v2 but never saw this on his v1.
>>
>> Have you verified whether this could be reproduced with the lasted
>> upstream kernel (without this patch series)?
> 
> Yes.

I am sorry. Just want to make sure I understand you correctly. :-) When
you say "yes", do you mean you could reproduce this with pure upstream
kernel (5.9-rc6)?

> Also, it's hitting a warning in the dma-iommu code which would not
> be hit without this patch set.

Without this series, DMA APIs don't go through dma-iommu. Do you mind
posting the warning message?

Best regards,
baolu
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-18 20:47 ` [Intel-gfx] " Logan Gunthorpe
  2020-09-20  6:36   ` Lu Baolu
@ 2020-09-22  9:51   ` Robin Murphy
  2020-09-22 18:45     ` Logan Gunthorpe
  1 sibling, 1 reply; 20+ messages in thread
From: Robin Murphy @ 2020-09-22  9:51 UTC (permalink / raw)
  To: Logan Gunthorpe, Lu Baolu, Joerg Roedel, Tom Murphy,
	David Woodhouse, Christoph Hellwig
  Cc: Intel-gfx, Ashok Raj, iommu, linux-kernel

On 2020-09-18 21:47, Logan Gunthorpe wrote:
> Hi Lu,
> 
> On 2020-09-11 9:21 p.m., Lu Baolu wrote:
>> Tom Murphy has almost done all the work. His latest patch series was
>> posted here.
>>
>> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/
>>
>> Thanks a lot!
>>
>> This series is a follow-up with below changes:
>>
>> 1. Add a quirk for the i915 driver issue described in Tom's cover
>> letter.
>> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
>> bounce buffers" to make the bounce buffer work for untrusted devices.
>> 3. Several cleanups in iommu/vt-d driver after the conversion.
>>
> 
> I'm trying to test this on an old Sandy Bridge, but found that I get
> spammed with warnings on boot. I've put a sample of a few of them below.
> They all seem to be related to ioat.
> 
> I had the same issue with Tom's v2 but never saw this on his v1.

I think this might have more to do with ioat being totally broken - 
AFAICS it appears to allocate descriptors with a size of 2MB, but free 
them with a size of 512KB. Try throwing CONFIG_DMA_API_DEBUG at it to 
confirm.

Robin.

> 
> Thanks,
> 
> Logan
> 
> 
> [   44.057877] ------------[ cut here ]------------
> [   44.063167] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   44.073351] Modules linked in:
> [   44.076882] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   44.087935] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   44.096650] Workqueue: events work_for_cpu_fn
> [   44.101644] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   44.107225] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   44.128443] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   44.134413] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   44.144487] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   44.152613] RBP: 00000000fec00000 R08: 00000000000fec00 R09: 00000000000fedff
> [   44.160733] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c39000
> [   44.168861] R13: ffff888472d85ee8 R14: 0000000000080000 R15: 0000000000000000
> [   44.176980] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   44.186198] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   44.192761] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   44.200871] Call Trace:
> [   44.203716]  ? lockdep_hardirqs_on_prepare+0xad/0x180
> [   44.209509]  iommu_dma_free+0x18/0x30
> [   44.213734]  ioat_free_chan_resources+0x19e/0x300
> [   44.219133]  ioat_dma_self_test+0x2a0/0x3d0
> [   44.223964]  ioat_pci_probe+0x60e/0x903
> [   44.228387]  local_pci_probe+0x42/0x80
> [   44.232721]  work_for_cpu_fn+0x16/0x20
> [   44.237037]  process_one_work+0x292/0x630
> [   44.241644]  ? __schedule+0x344/0x970
> [   44.245868]  worker_thread+0x227/0x3e0
> [   44.250185]  ? process_one_work+0x630/0x630
> [   44.254989]  kthread+0x136/0x150
> [   44.258718]  ? kthread_use_mm+0x100/0x100
> [   44.263334]  ret_from_fork+0x22/0x30
> [   44.267474] irq event stamp: 1881262
> [   44.271602] hardirqs last  enabled at (1881272): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   44.281593] hardirqs last disabled at (1881281): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   44.291588] softirqs last  enabled at (1747140): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   44.301285] softirqs last disabled at (1747144): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   44.312153] ---[ end trace ed0f88fd959a5a39 ]---
> [   44.353963] ------------[ cut here ]------------
> [   44.359291] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   44.369482] Modules linked in:
> [   44.373030] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   44.384097] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   44.392825] Workqueue: events work_for_cpu_fn
> [   44.397831] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   44.403421] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   44.424644] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   44.430627] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   44.438770] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   44.446885] RBP: 00000000ffc00000 R08: 00000000000ffc00 R09: 00000000000ffdff
> [   44.455000] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
> [   44.463119] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
> [   44.471235] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   44.480440] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   44.487004] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   44.495128] Call Trace:
> [   44.498008]  iommu_dma_free+0x18/0x30
> [   44.502230]  ioat_free_chan_resources+0x19e/0x300
> [   44.507631]  ioat_dma_self_test+0x2a0/0x3d0
> [   44.512466]  ioat_pci_probe+0x60e/0x903
> [   44.516889]  local_pci_probe+0x42/0x80
> [   44.521217]  work_for_cpu_fn+0x16/0x20
> [   44.525540]  process_one_work+0x292/0x630
> [   44.530157]  ? __schedule+0x344/0x970
> [   44.534393]  worker_thread+0x227/0x3e0
> [   44.538720]  ? process_one_work+0x630/0x630
> [   44.543531]  kthread+0x136/0x150
> [   44.547270]  ? kthread_use_mm+0x100/0x100
> [   44.551889]  ret_from_fork+0x22/0x30
> [   44.556034] irq event stamp: 2145654
> [   44.560161] hardirqs last  enabled at (2145664): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   44.570158] hardirqs last disabled at (2145673): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   44.580153] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   44.589854] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   44.600729] ---[ end trace ed0f88fd959a5a3a ]---
> [   44.606043] ------------[ cut here ]------------
> [   44.611331] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   44.621524] Modules linked in:
> [   44.625050] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   44.636119] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   44.644834] Workqueue: events work_for_cpu_fn
> [   44.649827] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   44.655410] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   44.676618] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   44.682588] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   44.690702] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   44.698816] RBP: 00000000ffa00000 R08: 00000000000ffa00 R09: 00000000000ffbff
> [   44.706934] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
> [   44.715054] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
> [   44.723174] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   44.732384] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   44.738941] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   44.747060] Call Trace:
> [   44.749921]  ? lockdep_hardirqs_on_prepare+0xad/0x180
> [   44.755706]  iommu_dma_free+0x18/0x30
> [   44.759931]  ioat_free_chan_resources+0x19e/0x300
> [   44.765332]  ioat_dma_self_test+0x2a0/0x3d0
> [   44.770156]  ioat_pci_probe+0x60e/0x903
> [   44.774583]  local_pci_probe+0x42/0x80
> [   44.778909]  work_for_cpu_fn+0x16/0x20
> [   44.783232]  process_one_work+0x292/0x630
> [   44.787846]  ? __schedule+0x344/0x970
> [   44.792079]  worker_thread+0x227/0x3e0
> [   44.796403]  ? process_one_work+0x630/0x630
> [   44.801205]  kthread+0x136/0x150
> [   44.804946]  ? kthread_use_mm+0x100/0x100
> [   44.809563]  ret_from_fork+0x22/0x30
> [   44.813704] irq event stamp: 2146044
> [   44.817831] hardirqs last  enabled at (2146054): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   44.827821] hardirqs last disabled at (2146063): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   44.837810] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   44.847503] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   44.858366] ---[ end trace ed0f88fd959a5a3b ]---
> [   44.863675] ------------[ cut here ]------------
> [   44.868969] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   44.879148] Modules linked in:
> [   44.882689] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   44.893760] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   44.902487] Workqueue: events work_for_cpu_fn
> [   44.907493] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   44.913080] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   44.934290] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   44.940254] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   44.948379] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   44.956505] RBP: 00000000ff800000 R08: 00000000000ff800 R09: 00000000000ff9ff
> [   44.964627] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
> [   44.972748] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
> [   44.980871] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   44.990084] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   44.996644] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   45.004759] Call Trace:
> [   45.007600]  ? lockdep_hardirqs_on_prepare+0xad/0x180
> [   45.013391]  iommu_dma_free+0x18/0x30
> [   45.017609]  ioat_free_chan_resources+0x19e/0x300
> [   45.022992]  ioat_dma_self_test+0x2a0/0x3d0
> [   45.027809]  ioat_pci_probe+0x60e/0x903
> [   45.032237]  local_pci_probe+0x42/0x80
> [   45.036563]  work_for_cpu_fn+0x16/0x20
> [   45.040886]  process_one_work+0x292/0x630
> [   45.045500]  ? __schedule+0x344/0x970
> [   45.049735]  worker_thread+0x227/0x3e0
> [   45.054061]  ? process_one_work+0x630/0x630
> [   45.058864]  kthread+0x136/0x150
> [   45.062597]  ? kthread_use_mm+0x100/0x100
> [   45.067200]  ret_from_fork+0x22/0x30
> [   45.071335] irq event stamp: 2146432
> [   45.075458] hardirqs last  enabled at (2146442): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   45.085440] hardirqs last disabled at (2146451): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   45.095432] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   45.105134] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   45.116004] ---[ end trace ed0f88fd959a5a3c ]---
> [   45.121305] ------------[ cut here ]------------
> [   45.126596] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   45.136772] Modules linked in:
> [   45.142288] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   45.153351] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   45.162077] Workqueue: events work_for_cpu_fn
> [   45.167074] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   45.172663] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   45.193874] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   45.199850] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   45.207970] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   45.216095] RBP: 00000000ff600000 R08: 00000000000ff600 R09: 00000000000ff7ff
> [   45.224207] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
> [   45.232327] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
> [   45.240446] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   45.249662] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   45.256225] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   45.264344] Call Trace:
> [   45.267205]  ? lockdep_hardirqs_on_prepare+0xad/0x180
> [   45.273003]  iommu_dma_free+0x18/0x30
> [   45.277231]  ioat_free_chan_resources+0x19e/0x300
> [   45.282631]  ioat_dma_self_test+0x2a0/0x3d0
> [   45.287457]  ioat_pci_probe+0x60e/0x903
> [   45.291894]  local_pci_probe+0x42/0x80
> [   45.296217]  work_for_cpu_fn+0x16/0x20
> [   45.300539]  process_one_work+0x292/0x630
> [   45.305162]  ? __schedule+0x344/0x970
> [   45.309399]  worker_thread+0x227/0x3e0
> [   45.313724]  ? process_one_work+0x630/0x630
> [   45.318535]  kthread+0x136/0x150
> [   45.322274]  ? kthread_use_mm+0x100/0x100
> [   45.326893]  ret_from_fork+0x22/0x30
> [   45.331034] irq event stamp: 2146822
> [   45.335161] hardirqs last  enabled at (2146832): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   45.345154] hardirqs last disabled at (2146841): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   45.355150] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   45.364850] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   45.375710] ---[ end trace ed0f88fd959a5a3d ]---
> [   45.381018] ------------[ cut here ]------------
> [   45.386308] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   45.396494] Modules linked in:
> [   45.400020] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   45.411086] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   45.419810] Workqueue: events work_for_cpu_fn
> [   45.424819] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   45.430406] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   45.451630] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   45.457610] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   45.465730] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   45.473853] RBP: 00000000ff400000 R08: 00000000000ff400 R09: 00000000000ff5ff
> [   45.481973] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
> [   45.490092] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
> [   45.498211] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   45.507418] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   45.513969] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   45.522081] Call Trace:
> [   45.524934]  ? lockdep_hardirqs_on_prepare+0xad/0x180
> [   45.530718]  iommu_dma_free+0x18/0x30
> [   45.534989]  ioat_free_chan_resources+0x19e/0x300
> [   45.540378]  ioat_dma_self_test+0x2a0/0x3d0
> [   45.545193]  ioat_pci_probe+0x60e/0x903
> [   45.549619]  local_pci_probe+0x42/0x80
> [   45.553940]  work_for_cpu_fn+0x16/0x20
> [   45.558258]  process_one_work+0x292/0x630
> [   45.562871]  ? __schedule+0x344/0x970
> [   45.567108]  worker_thread+0x227/0x3e0
> [   45.571434]  ? process_one_work+0x630/0x630
> [   45.576243]  kthread+0x136/0x150
> [   45.579981]  ? kthread_use_mm+0x100/0x100
> [   45.584598]  ret_from_fork+0x22/0x30
> [   45.588733] irq event stamp: 2147210
> [   45.592860] hardirqs last  enabled at (2147220): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   45.602845] hardirqs last disabled at (2147229): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   45.612836] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   45.622531] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   45.633392] ---[ end trace ed0f88fd959a5a3e ]---
> [   45.638708] ------------[ cut here ]------------
> [   45.644003] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   45.654191] Modules linked in:
> [   45.657734] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   45.668807] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   45.677534] Workqueue: events work_for_cpu_fn
> [   45.682538] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   45.688126] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   45.709343] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   45.715321] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   45.723440] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   45.731555] RBP: 00000000ff200000 R08: 00000000000ff200 R09: 00000000000ff3ff
> [   45.739665] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
> [   45.747777] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
> [   45.755897] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   45.765106] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   45.771661] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   45.779778] Call Trace:
> [   45.782638]  ? lockdep_hardirqs_on_prepare+0xad/0x180
> [   45.788439]  iommu_dma_free+0x18/0x30
> [   45.792664]  ioat_free_chan_resources+0x19e/0x300
> [   45.798060]  ioat_dma_self_test+0x2a0/0x3d0
> [   45.802886]  ioat_pci_probe+0x60e/0x903
> [   45.807306]  local_pci_probe+0x42/0x80
> [   45.811633]  work_for_cpu_fn+0x16/0x20
> [   45.815946]  process_one_work+0x292/0x630
> [   45.820556]  ? __schedule+0x344/0x970
> [   45.824791]  worker_thread+0x227/0x3e0
> [   45.829117]  ? process_one_work+0x630/0x630
> [   45.833969]  kthread+0x136/0x150
> [   45.837706]  ? kthread_use_mm+0x100/0x100
> [   45.842325]  ret_from_fork+0x22/0x30
> [   45.846475] irq event stamp: 2147602
> [   45.850608] hardirqs last  enabled at (2147612): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   45.860603] hardirqs last disabled at (2147621): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   45.870589] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   45.880293] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   45.891163] ---[ end trace ed0f88fd959a5a3f ]---
> [   45.896473] ------------[ cut here ]------------
> [   45.901762] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   45.911956] Modules linked in:
> [   45.915503] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   45.926570] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   45.935302] Workqueue: events work_for_cpu_fn
> [   45.940312] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   45.945905] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   45.967128] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   45.973111] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   45.981236] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   45.989363] RBP: 00000000ff000000 R08: 00000000000ff000 R09: 00000000000ff1ff
> [   45.997489] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
> [   46.005613] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
> [   46.013738] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   46.022952] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   46.029521] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   46.037643] Call Trace:
> [   46.040508]  ? lockdep_hardirqs_on_prepare+0xad/0x180
> [   46.046305]  iommu_dma_free+0x18/0x30
> [   46.050538]  ioat_free_chan_resources+0x19e/0x300
> [   46.055941]  ioat_dma_self_test+0x2a0/0x3d0
> [   46.060765]  ioat_pci_probe+0x60e/0x903
> [   46.065196]  local_pci_probe+0x42/0x80
> [   46.069526]  work_for_cpu_fn+0x16/0x20
> [   46.073854]  process_one_work+0x292/0x630
> [   46.078477]  ? __schedule+0x344/0x970
> [   46.082717]  worker_thread+0x227/0x3e0
> [   46.087035]  ? process_one_work+0x630/0x630
> [   46.091849]  kthread+0x136/0x150
> [   46.095594]  ? kthread_use_mm+0x100/0x100
> [   46.100218]  ret_from_fork+0x22/0x30
> [   46.104366] irq event stamp: 2147992
> [   46.108499] hardirqs last  enabled at (2148002): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   46.120483] hardirqs last disabled at (2148011): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   46.130472] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   46.140171] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   46.151034] ---[ end trace ed0f88fd959a5a40 ]---
> [   46.156339] ------------[ cut here ]------------
> [   46.161628] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496 __iommu_dma_unmap+0x115/0x130
> [   46.171817] Modules linked in:
> [   46.175365] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W         5.9.0-rc4-00006-g110da1e703a2 #216
> [   46.186430] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS 3.0a 12/05/2013
> [   46.195163] Workqueue: events work_for_cpu_fn
> [   46.200173] RIP: 0010:__iommu_dma_unmap+0x115/0x130
> [   46.205767] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24 18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb 8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
> [   46.226991] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
> [   46.232976] RAX: 0000000000200000 RBX: 0000000000080000 RCX: 0000000000000000
> [   46.241101] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI: ffff888477804f30
> [   46.249226] RBP: 00000000fec00000 R08: 00000000000fec00 R09: 00000000000fedff
> [   46.257352] R10: 0000000000000002 R11: 0000000000000004 R12: ffff888270c55000
> [   46.265474] R13: ffff888472dc7268 R14: 0000000000080000 R15: 0000000000000000
> [   46.273602] FS:  0000000000000000(0000) GS:ffff888479800000(0000) knlGS:0000000000000000
> [   46.282822] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   46.289388] CR2: 0000000000000000 CR3: 0000000002a20001 CR4: 00000000000606e0
> [   46.297513] Call Trace:
> [   46.300358]  ? lockdep_hardirqs_on_prepare+0xad/0x180
> [   46.306152]  iommu_dma_free+0x18/0x30
> [   46.310379]  ioat_free_chan_resources+0x19e/0x300
> [   46.315776]  ioat_dma_self_test+0x2a0/0x3d0
> [   46.320607]  ioat_pci_probe+0x60e/0x903
> [   46.325044]  local_pci_probe+0x42/0x80
> [   46.329373]  work_for_cpu_fn+0x16/0x20
> [   46.333699]  process_one_work+0x292/0x630
> [   46.338321]  ? __schedule+0x344/0x970
> [   46.342563]  worker_thread+0x227/0x3e0
> [   46.346891]  ? process_one_work+0x630/0x630
> [   46.351704]  kthread+0x136/0x150
> [   46.355449]  ? kthread_use_mm+0x100/0x100
> [   46.360069]  ret_from_fork+0x22/0x30
> [   46.364204] irq event stamp: 2148380
> [   46.368338] hardirqs last  enabled at (2148390): [<ffffffff811b8465>] console_unlock+0x435/0x570
> [   46.378336] hardirqs last disabled at (2148399): [<ffffffff811b845b>] console_unlock+0x42b/0x570
> [   46.388335] softirqs last  enabled at (2014254): [<ffffffff817da3e5>] ioat_cleanup+0x65/0x470
> [   46.398040] softirqs last disabled at (2014258): [<ffffffff817d797a>] ioat_free_chan_resources+0x6a/0x300
> [   46.408914] ---[ end trace ed0f88fd959a5a41 ]---
> _______________________________________________
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-15  8:31     ` Tvrtko Ursulin
@ 2020-09-22 11:05       ` Robin Murphy
  2020-09-23  5:38         ` Lu Baolu
  2020-09-24  2:35       ` Lu Baolu
  1 sibling, 1 reply; 20+ messages in thread
From: Robin Murphy @ 2020-09-22 11:05 UTC (permalink / raw)
  To: Tvrtko Ursulin, Lu Baolu, Joerg Roedel, Tom Murphy,
	David Woodhouse, Christoph Hellwig
  Cc: Intel-gfx, Ashok Raj, iommu, linux-kernel

On 2020-09-15 09:31, Tvrtko Ursulin wrote:
> 
> On 15/09/2020 02:47, Lu Baolu wrote:
>> Hi Tvrtko,
>>
>> On 9/14/20 4:04 PM, Tvrtko Ursulin wrote:
>>>
>>> Hi,
>>>
>>> On 12/09/2020 04:21, Lu Baolu wrote:
>>>> Tom Murphy has almost done all the work. His latest patch series was
>>>> posted here.
>>>>
>>>> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/ 
>>>>
>>>>
>>>> Thanks a lot!
>>>>
>>>> This series is a follow-up with below changes:
>>>>
>>>> 1. Add a quirk for the i915 driver issue described in Tom's cover
>>>> letter.
>>>
>>> Last week I have copied you on an i915 series which appears to remove 
>>> the need for this quirk. so if we get those i915 patches reviewed and 
>>> merged, do you still want to pursue this quirk?
>>
>> It's up to the graphic guys. I don't know the details in i915 driver.
>> I don't think my tests could cover all cases.
> 
> I am the graphic guy. :) I just need some reviews (internally) for my 
> series and then we can merge it, at which point you don't need the quirk 
> patch any more. I'll try to accelerate this.
> 
> With regards to testing, you could send your series with my patches on 
> top to our trybot mailing list (intel-gfx-trybot@lists.freedesktop.org / 
> https://patchwork.freedesktop.org/project/intel-gfx-trybot/series/?ordering=-last_updated) 
> which would show you if it is still hitting the DMAR issues in i915.
> 
>>>
>>>> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
>>>> bounce buffers" to make the bounce buffer work for untrusted devices.
>>>> 3. Several cleanups in iommu/vt-d driver after the conversion.
>>>
>>> With the previous version of the series I hit a problem on Ivybridge 
>>> where apparently the dma engine width is not respected. At least that 
>>> is my layman interpretation of the errors. From the older thread:
>>>
>>> <3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not 
>>> sufficient for the mapped address (ffff008000)
>>>
>>> Relevant iommu boot related messages are:
>>>
>>> <6>[    0.184234] DMAR: Host address width 36
>>> <6>[    0.184245] DMAR: DRHD base: 0x000000fed90000 flags: 0x0
>>> <6>[    0.184288] DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap 
>>> c0000020e60262 ecap f0101a
>>> <6>[    0.184308] DMAR: DRHD base: 0x000000fed91000 flags: 0x1
>>> <6>[    0.184337] DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap 
>>> c9008020660262 ecap f0105a
>>> <6>[    0.184357] DMAR: RMRR base: 0x000000d8d28000 end: 
>>> 0x000000d8d46fff
>>> <6>[    0.184377] DMAR: RMRR base: 0x000000db000000 end: 
>>> 0x000000df1fffff
>>> <6>[    0.184398] DMAR-IR: IOAPIC id 2 under DRHD base  0xfed91000 
>>> IOMMU 1
>>> <6>[    0.184414] DMAR-IR: HPET id 0 under DRHD base 0xfed91000
>>> <6>[    0.184428] DMAR-IR: Queued invalidation will be enabled to 
>>> support x2apic and Intr-remapping.
>>> <6>[    0.185173] DMAR-IR: Enabled IRQ remapping in x2apic mode
>>>
>>> <6>[    0.878934] DMAR: No ATSR found
>>> <6>[    0.878966] DMAR: dmar0: Using Queued invalidation
>>> <6>[    0.879007] DMAR: dmar1: Using Queued invalidation
>>>
>>> <6>[    0.915032] DMAR: Intel(R) Virtualization Technology for 
>>> Directed I/O
>>> <6>[    0.915060] PCI-DMA: Using software bounce buffering for IO 
>>> (SWIOTLB)
>>> <6>[    0.915084] software IO TLB: mapped [mem 0xc80d4000-0xcc0d4000] 
>>> (64MB)
>>>
>>> (Full boot log at 
>>> https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/boot0.txt, 
>>> failures at 
>>> https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/igt@i915_selftest@live@blt.html.) 
>>>
>>>
>>> Does this look familiar or at least plausible to you? Is this 
>>> something your new series has fixed?
>>
>> This happens during attaching a domain to device. It has nothing to do
>> with this patch series. I will look into this issue, but not in this
>> email thread context.
> 
> I am not sure what step is attaching domain to device, but these type 
> messages:
> 
> <3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not
>  >> sufficient for the mapped address (ffff008000)
> 
> They definitely appear to happen at runtime, as i915 is getting 
> exercised by userspace.

AFAICS this certainly might be related to this series - iommu-dma will 
constrain IOVA allocation based on the domain geometry that the driver 
reports, which in this case is set only once when first allocating the 
domain. Thus it looks like both the dmar_domain->gaw adjustment in 
prepare_domain_attach_device() and the domain_use_first_level() business 
in intel_alloc_iova() effectively get lost in this conversion, since the 
domain geometry never gets updated to reflect those additional constraints.

Robin.

> 
> Regards,
> 
> Tvrtko
> _______________________________________________
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-22  0:24       ` Lu Baolu
@ 2020-09-22 15:38         ` Logan Gunthorpe
  0 siblings, 0 replies; 20+ messages in thread
From: Logan Gunthorpe @ 2020-09-22 15:38 UTC (permalink / raw)
  To: Lu Baolu, Joerg Roedel, Tom Murphy, David Woodhouse, Christoph Hellwig
  Cc: Intel-gfx, Ashok Raj, iommu, linux-kernel



On 2020-09-21 6:24 p.m., Lu Baolu wrote:
>>>> I'm trying to test this on an old Sandy Bridge, but found that I get
>>>> spammed with warnings on boot. I've put a sample of a few of them below.
>>>> They all seem to be related to ioat.
>>>>
>>>> I had the same issue with Tom's v2 but never saw this on his v1.
>>>
>>> Have you verified whether this could be reproduced with the lasted
>>> upstream kernel (without this patch series)?
>>
>> Yes.
> 
> I am sorry. Just want to make sure I understand you correctly. :-) When
> you say "yes", do you mean you could reproduce this with pure upstream
> kernel (5.9-rc6)?

I mean I've verified the bug does *not* exist without this patch set.

> 
>> Also, it's hitting a warning in the dma-iommu code which would not
>> be hit without this patch set.
> 
> Without this series, DMA APIs don't go through dma-iommu. Do you mind
> posting the warning message?

It was on my original email but here it is again:


[   44.057877] ------------[ cut here ]------------
[   44.063167] WARNING: CPU: 4 PID: 84 at drivers/iommu/dma-iommu.c:496
__iommu_dma_unmap+0x115/0x130
[   44.073351] Modules linked in:
[   44.076882] CPU: 4 PID: 84 Comm: kworker/4:1 Tainted: G        W
   5.9.0-rc4-00006-g110da1e703a2 #216
[   44.087935] Hardware name: Supermicro SYS-7047GR-TRF/X9DRG-QF, BIOS
3.0a 12/05/2013
[   44.096650] Workqueue: events work_for_cpu_fn
[   44.101644] RIP: 0010:__iommu_dma_unmap+0x115/0x130
[   44.107225] Code: 48 8b 0c 24 48 c7 44 24 10 00 00 00 00 48 c7 44 24
18 00 00 00 00 48 c7 44 24 20 00 00 00 00 48 c7 44 24 08 ff ff ff ff eb
8d <0f> 0b e9 74 ff ff ff e8 1f 36 66 00 66 66 2e 0f 1f 84 00 00 00 00
[   44.128443] RSP: 0000:ffffc90002397c38 EFLAGS: 00010206
[   44.134413] RAX: 0000000000200000 RBX: 0000000000080000 RCX:
0000000000000000
[   44.144487] RDX: 0000000000000403 RSI: ffffffff82a7fb20 RDI:
ffff888477804f30
[   44.152613] RBP: 00000000fec00000 R08: 00000000000fec00 R09:
00000000000fedff
[   44.160733] R10: 0000000000000002 R11: 0000000000000004 R12:
ffff888270c39000
[   44.168861] R13: ffff888472d85ee8 R14: 0000000000080000 R15:
0000000000000000
[   44.176980] FS:  0000000000000000(0000) GS:ffff888479800000(0000)
knlGS:0000000000000000
[   44.186198] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   44.192761] CR2: 0000000000000000 CR3: 0000000002a20001 CR4:
00000000000606e0
[   44.200871] Call Trace:
[   44.203716]  ? lockdep_hardirqs_on_prepare+0xad/0x180
[   44.209509]  iommu_dma_free+0x18/0x30
[   44.213734]  ioat_free_chan_resources+0x19e/0x300
[   44.219133]  ioat_dma_self_test+0x2a0/0x3d0
[   44.223964]  ioat_pci_probe+0x60e/0x903
[   44.228387]  local_pci_probe+0x42/0x80
[   44.232721]  work_for_cpu_fn+0x16/0x20
[   44.237037]  process_one_work+0x292/0x630
[   44.241644]  ? __schedule+0x344/0x970
[   44.245868]  worker_thread+0x227/0x3e0
[   44.250185]  ? process_one_work+0x630/0x630
[   44.254989]  kthread+0x136/0x150
[   44.258718]  ? kthread_use_mm+0x100/0x100
[   44.263334]  ret_from_fork+0x22/0x30
[   44.267474] irq event stamp: 1881262
[   44.271602] hardirqs last  enabled at (1881272): [<ffffffff811b8465>]
console_unlock+0x435/0x570
[   44.281593] hardirqs last disabled at (1881281): [<ffffffff811b845b>]
console_unlock+0x42b/0x570
[   44.291588] softirqs last  enabled at (1747140): [<ffffffff817da3e5>]
ioat_cleanup+0x65/0x470
[   44.301285] softirqs last disabled at (1747144): [<ffffffff817d797a>]
ioat_free_chan_resources+0x6a/0x300
[   44.312153] ---[ end trace ed0f88fd959a5a39 ]---
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-22  9:51   ` Robin Murphy
@ 2020-09-22 18:45     ` Logan Gunthorpe
  0 siblings, 0 replies; 20+ messages in thread
From: Logan Gunthorpe @ 2020-09-22 18:45 UTC (permalink / raw)
  To: Robin Murphy, Lu Baolu, Joerg Roedel, Tom Murphy,
	David Woodhouse, Christoph Hellwig
  Cc: Intel-gfx, Ashok Raj, iommu, linux-kernel



On 2020-09-22 3:51 a.m., Robin Murphy wrote:
> On 2020-09-18 21:47, Logan Gunthorpe wrote:
>> Hi Lu,
>>
>> On 2020-09-11 9:21 p.m., Lu Baolu wrote:
>>> Tom Murphy has almost done all the work. His latest patch series was
>>> posted here.
>>>
>>> https://lore.kernel.org/linux-iommu/20200903201839.7327-1-murphyt7@tcd.ie/
>>>
>>> Thanks a lot!
>>>
>>> This series is a follow-up with below changes:
>>>
>>> 1. Add a quirk for the i915 driver issue described in Tom's cover
>>> letter.
>>> 2. Fix several bugs in patch "iommu: Allow the dma-iommu api to use
>>> bounce buffers" to make the bounce buffer work for untrusted devices.
>>> 3. Several cleanups in iommu/vt-d driver after the conversion.
>>>
>>
>> I'm trying to test this on an old Sandy Bridge, but found that I get
>> spammed with warnings on boot. I've put a sample of a few of them below.
>> They all seem to be related to ioat.
>>
>> I had the same issue with Tom's v2 but never saw this on his v1.
> 
> I think this might have more to do with ioat being totally broken - 
> AFAICS it appears to allocate descriptors with a size of 2MB, but free 
> them with a size of 512KB. Try throwing CONFIG_DMA_API_DEBUG at it to 
> confirm.

Ah, yes, nice catch. Looks like it was broken recently by the following
commit, but nobody noticed and the dma-iommu patch set added a warning
which caught it.

a02254f8a676 ("dmaengine: ioat: Decreasing allocation chunk size 2M->512K")

Reverting that fixes the issue. I'll try to send patch or two for this.

Logan

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-22 11:05       ` Robin Murphy
@ 2020-09-23  5:38         ` Lu Baolu
  0 siblings, 0 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-23  5:38 UTC (permalink / raw)
  To: Robin Murphy, Tvrtko Ursulin, Joerg Roedel, Tom Murphy,
	David Woodhouse, Christoph Hellwig
  Cc: linux-kernel, Intel-gfx, Ashok Raj, iommu

On 9/22/20 7:05 PM, Robin Murphy wrote:
>>>> With the previous version of the series I hit a problem on Ivybridge 
>>>> where apparently the dma engine width is not respected. At least 
>>>> that is my layman interpretation of the errors. From the older thread:
>>>>
>>>> <3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not 
>>>> sufficient for the mapped address (ffff008000)
>>>>
>>>> Relevant iommu boot related messages are:
>>>>
>>>> <6>[    0.184234] DMAR: Host address width 36
>>>> <6>[    0.184245] DMAR: DRHD base: 0x000000fed90000 flags: 0x0
>>>> <6>[    0.184288] DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap 
>>>> c0000020e60262 ecap f0101a
>>>> <6>[    0.184308] DMAR: DRHD base: 0x000000fed91000 flags: 0x1
>>>> <6>[    0.184337] DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap 
>>>> c9008020660262 ecap f0105a
>>>> <6>[    0.184357] DMAR: RMRR base: 0x000000d8d28000 end: 
>>>> 0x000000d8d46fff
>>>> <6>[    0.184377] DMAR: RMRR base: 0x000000db000000 end: 
>>>> 0x000000df1fffff
>>>> <6>[    0.184398] DMAR-IR: IOAPIC id 2 under DRHD base  0xfed91000 
>>>> IOMMU 1
>>>> <6>[    0.184414] DMAR-IR: HPET id 0 under DRHD base 0xfed91000
>>>> <6>[    0.184428] DMAR-IR: Queued invalidation will be enabled to 
>>>> support x2apic and Intr-remapping.
>>>> <6>[    0.185173] DMAR-IR: Enabled IRQ remapping in x2apic mode
>>>>
>>>> <6>[    0.878934] DMAR: No ATSR found
>>>> <6>[    0.878966] DMAR: dmar0: Using Queued invalidation
>>>> <6>[    0.879007] DMAR: dmar1: Using Queued invalidation
>>>>
>>>> <6>[    0.915032] DMAR: Intel(R) Virtualization Technology for 
>>>> Directed I/O
>>>> <6>[    0.915060] PCI-DMA: Using software bounce buffering for IO 
>>>> (SWIOTLB)
>>>> <6>[    0.915084] software IO TLB: mapped [mem 
>>>> 0xc80d4000-0xcc0d4000] (64MB)
>>>>
>>>> (Full boot log at 
>>>> https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/boot0.txt, 
>>>> failures at 
>>>> https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/igt@i915_selftest@live@blt.html.) 
>>>>
>>>>
>>>> Does this look familiar or at least plausible to you? Is this 
>>>> something your new series has fixed?
>>>
>>> This happens during attaching a domain to device. It has nothing to do
>>> with this patch series. I will look into this issue, but not in this
>>> email thread context.
>>
>> I am not sure what step is attaching domain to device, but these type 
>> messages:
>>
>> <3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not
>>  >> sufficient for the mapped address (ffff008000)
>>
>> They definitely appear to happen at runtime, as i915 is getting 
>> exercised by userspace.
> 
> AFAICS this certainly might be related to this series - iommu-dma will 

Oh! I looked at the wrong function. prepare_domain_attach_device()
prints a similar message which made me believe that it was not caused
by the this patches series.

> constrain IOVA allocation based on the domain geometry that the driver 
> reports, which in this case is set only once when first allocating the 
> domain. Thus it looks like both the dmar_domain->gaw adjustment in 
> prepare_domain_attach_device() and the domain_use_first_level() business 
> in intel_alloc_iova() effectively get lost in this conversion, since the 
> domain geometry never gets updated to reflect those additional constraints.

Sounds reasonable. I will look into the code and work out a fix.

> > Robin.
> 

Best regards,
baolu
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api
  2020-09-15  8:31     ` Tvrtko Ursulin
  2020-09-22 11:05       ` Robin Murphy
@ 2020-09-24  2:35       ` Lu Baolu
  1 sibling, 0 replies; 20+ messages in thread
From: Lu Baolu @ 2020-09-24  2:35 UTC (permalink / raw)
  To: Tvrtko Ursulin, Joerg Roedel, Tom Murphy, David Woodhouse,
	Christoph Hellwig
  Cc: linux-kernel, Intel-gfx, Ashok Raj, iommu

Hi Tvrtko,

On 9/15/20 4:31 PM, Tvrtko Ursulin wrote:
>>> With the previous version of the series I hit a problem on Ivybridge 
>>> where apparently the dma engine width is not respected. At least that 
>>> is my layman interpretation of the errors. From the older thread:
>>>
>>> <3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not 
>>> sufficient for the mapped address (ffff008000)
>>>
>>> Relevant iommu boot related messages are:
>>>
>>> <6>[    0.184234] DMAR: Host address width 36
>>> <6>[    0.184245] DMAR: DRHD base: 0x000000fed90000 flags: 0x0
>>> <6>[    0.184288] DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap 
>>> c0000020e60262 ecap f0101a
>>> <6>[    0.184308] DMAR: DRHD base: 0x000000fed91000 flags: 0x1
>>> <6>[    0.184337] DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap 
>>> c9008020660262 ecap f0105a
>>> <6>[    0.184357] DMAR: RMRR base: 0x000000d8d28000 end: 
>>> 0x000000d8d46fff
>>> <6>[    0.184377] DMAR: RMRR base: 0x000000db000000 end: 
>>> 0x000000df1fffff
>>> <6>[    0.184398] DMAR-IR: IOAPIC id 2 under DRHD base  0xfed91000 
>>> IOMMU 1
>>> <6>[    0.184414] DMAR-IR: HPET id 0 under DRHD base 0xfed91000
>>> <6>[    0.184428] DMAR-IR: Queued invalidation will be enabled to 
>>> support x2apic and Intr-remapping.
>>> <6>[    0.185173] DMAR-IR: Enabled IRQ remapping in x2apic mode
>>>
>>> <6>[    0.878934] DMAR: No ATSR found
>>> <6>[    0.878966] DMAR: dmar0: Using Queued invalidation
>>> <6>[    0.879007] DMAR: dmar1: Using Queued invalidation
>>>
>>> <6>[    0.915032] DMAR: Intel(R) Virtualization Technology for 
>>> Directed I/O
>>> <6>[    0.915060] PCI-DMA: Using software bounce buffering for IO 
>>> (SWIOTLB)
>>> <6>[    0.915084] software IO TLB: mapped [mem 0xc80d4000-0xcc0d4000] 
>>> (64MB)
>>>
>>> (Full boot log at 
>>> https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/boot0.txt, 
>>> failures at 
>>> https://intel-gfx-ci.01.org/tree/drm-tip/Trybot_7054/fi-ivb-3770/igt@i915_selftest@live@blt.html.) 
>>>
>>>
>>> Does this look familiar or at least plausible to you? Is this 
>>> something your new series has fixed?
>>
>> This happens during attaching a domain to device. It has nothing to do
>> with this patch series. I will look into this issue, but not in this
>> email thread context.
> 
> I am not sure what step is attaching domain to device, but these type 
> messages:
> 
> <3> [209.526605] DMAR: intel_iommu_map: iommu width (39) is not
>  >> sufficient for the mapped address (ffff008000)
> 
> They definitely appear to happen at runtime, as i915 is getting 
> exercised by userspace.

Can you please check whether below change helps here?

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index c8323a9f8bde..0484c539debc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -724,6 +724,7 @@ static int domain_update_device_node(struct 
dmar_domain *domain)
  /* Some capabilities may be different across iommus */
  static void domain_update_iommu_cap(struct dmar_domain *domain)
  {
+       domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
         domain_update_iommu_coherency(domain);
         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
         domain->iommu_superpage = domain_update_iommu_superpage(domain, 
NULL);

Best regards,
baolu
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2020-09-24  2:42 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-12  3:21 [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Lu Baolu
2020-09-12  3:21 ` [PATCH v3 1/6] iommu: Handle freelists when using deferred flushing in iommu drivers Lu Baolu
2020-09-12  3:21 ` [PATCH v3 2/6] iommu: Add iommu_dma_free_cpu_cached_iovas() Lu Baolu
2020-09-12  3:21 ` [PATCH v3 3/6] iommu: Allow the dma-iommu api to use bounce buffers Lu Baolu
2020-09-12  3:21 ` [PATCH v3 4/6] iommu: Add quirk for Intel graphic devices in map_sg Lu Baolu
2020-09-12  3:21 ` [PATCH v3 5/6] iommu/vt-d: Convert intel iommu driver to the iommu ops Lu Baolu
2020-09-12  3:22 ` [PATCH v3 6/6] iommu/vt-d: Cleanup after converting to dma-iommu ops Lu Baolu
2020-09-14  8:04 ` [PATCH v3 0/6] Convert the intel iommu driver to the dma-iommu api Tvrtko Ursulin
2020-09-15  1:47   ` Lu Baolu
2020-09-15  8:31     ` Tvrtko Ursulin
2020-09-22 11:05       ` Robin Murphy
2020-09-23  5:38         ` Lu Baolu
2020-09-24  2:35       ` Lu Baolu
2020-09-18 20:47 ` [Intel-gfx] " Logan Gunthorpe
2020-09-20  6:36   ` Lu Baolu
2020-09-21 15:48     ` Logan Gunthorpe
2020-09-22  0:24       ` Lu Baolu
2020-09-22 15:38         ` Logan Gunthorpe
2020-09-22  9:51   ` Robin Murphy
2020-09-22 18:45     ` Logan Gunthorpe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).