From: Tom Murphy <murphyt7@tcd.ie>
To: iommu@lists.linux-foundation.org
Cc: Heiko Stuebner <heiko@sntech.de>,
kvm@vger.kernel.org, David Airlie <airlied@linux.ie>,
Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
dri-devel@lists.freedesktop.org,
Bjorn Andersson <bjorn.andersson@linaro.org>,
linux-tegra@vger.kernel.org, Julien Grall <julien.grall@arm.com>,
Thierry Reding <thierry.reding@gmail.com>,
Will Deacon <will@kernel.org>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Jean-Philippe Brucker <jean-philippe@linaro.org>,
linux-samsung-soc@vger.kernel.org, Marc Zyngier <maz@kernel.org>,
Joerg Roedel <joro@8bytes.org>,
Krzysztof Kozlowski <krzk@kernel.org>,
Jonathan Hunter <jonathanh@nvidia.com>,
linux-rockchip@lists.infradead.org,
Andy Gross <agross@kernel.org>,
linux-arm-kernel@lists.infradead.org, linux-s390@vger.kernel.org,
linux-arm-msm@vger.kernel.org, intel-gfx@lists.freedesktop.org,
Jani Nikula <jani.nikula@linux.intel.com>,
Eric Auger <eric.auger@redhat.com>,
Alex Williamson <alex.williamson@redhat.com>,
linux-mediatek@lists.infradead.org,
Rodrigo Vivi <rodrigo.vivi@intel.com>,
Matthias Brugger <matthias.bgg@gmail.com>,
Thomas Gleixner <tglx@linutronix.de>,
virtualization@lists.linux-foundation.org,
Gerald Schaefer <gerald.schaefer@de.ibm.com>,
David Woodhouse <dwmw2@infradead.org>,
Cornelia Huck <cohuck@redhat.com>,
linux-kernel@vger.kernel.org, Tom Murphy <murphyt7@tcd.ie>,
Rob Clark <robdclark@gmail.com>, Kukjin Kim <kgene@kernel.org>,
Daniel Vetter <daniel@ffwll.ch>,
Robin Murphy <robin.murphy@arm.com>,
Lu Baolu <baolu.lu@linux.intel.com>
Subject: [PATCH 1/8] iommu/vt-d: clean up 32bit si_domain assignment
Date: Sat, 21 Dec 2019 15:03:53 +0000 [thread overview]
Message-ID: <20191221150402.13868-2-murphyt7@tcd.ie> (raw)
In-Reply-To: <20191221150402.13868-1-murphyt7@tcd.ie>
In the intel iommu driver devices which only support 32bit DMA can't be
direct mapped. The implementation of this is weird. Currently we assign
it a direct mapped domain and then remove the domain later and replace
it with a domain of type IOMMU_DOMAIN_IDENTITY. We should just assign it
a domain of type IOMMU_DOMAIN_IDENTITY from the begging rather than
needlessly swapping domains.
Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
---
drivers/iommu/intel-iommu.c | 88 +++++++++++++------------------------
1 file changed, 31 insertions(+), 57 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0c8d81f56a30..c1ea66467918 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3462,46 +3462,9 @@ static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
}
/* Check if the dev needs to go through non-identity map and unmap process.*/
-static bool iommu_need_mapping(struct device *dev)
+static bool iommu_no_mapping(struct device *dev)
{
- int ret;
-
- if (iommu_dummy(dev))
- return false;
-
- ret = identity_mapping(dev);
- if (ret) {
- u64 dma_mask = *dev->dma_mask;
-
- if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
-
- if (dma_mask >= dma_direct_get_required_mask(dev))
- return false;
-
- /*
- * 32 bit DMA is removed from si_domain and fall back to
- * non-identity mapping.
- */
- dmar_remove_one_dev_info(dev);
- ret = iommu_request_dma_domain_for_dev(dev);
- if (ret) {
- struct iommu_domain *domain;
- struct dmar_domain *dmar_domain;
-
- domain = iommu_get_domain_for_dev(dev);
- if (domain) {
- dmar_domain = to_dmar_domain(domain);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- }
- dmar_remove_one_dev_info(dev);
- get_private_domain_for_dev(dev);
- }
-
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- }
-
- return true;
+ return iommu_dummy(dev) || identity_mapping(dev);
}
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
@@ -3568,20 +3531,22 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- return __intel_map_single(dev, page_to_phys(page) + offset,
- size, dir, *dev->dma_mask);
- return dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ if (iommu_no_mapping(dev))
+ return dma_direct_map_page(dev, page, offset, size, dir, attrs);
+
+ return __intel_map_single(dev, page_to_phys(page) + offset, size, dir,
+ *dev->dma_mask);
}
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- return __intel_map_single(dev, phys_addr, size, dir,
- *dev->dma_mask);
- return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
+ if (iommu_no_mapping(dev))
+ return dma_direct_map_resource(dev, phys_addr, size, dir,
+ attrs);
+
+ return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3632,16 +3597,16 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- intel_unmap(dev, dev_addr, size);
- else
+ if (iommu_no_mapping(dev))
dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+ else
+ intel_unmap(dev, dev_addr, size);
}
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (iommu_need_mapping(dev))
+ if (!iommu_no_mapping(dev))
intel_unmap(dev, dev_addr, size);
}
@@ -3652,7 +3617,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
- if (!iommu_need_mapping(dev))
+ if (iommu_no_mapping(dev))
return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
size = PAGE_ALIGN(size);
@@ -3688,7 +3653,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order;
struct page *page = virt_to_page(vaddr);
- if (!iommu_need_mapping(dev))
+ if (iommu_no_mapping(dev))
return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
size = PAGE_ALIGN(size);
@@ -3708,7 +3673,7 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
- if (!iommu_need_mapping(dev))
+ if (iommu_no_mapping(dev))
return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
for_each_sg(sglist, sg, nelems, i) {
@@ -3734,7 +3699,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (!iommu_need_mapping(dev))
+ if (iommu_no_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = deferred_attach_domain(dev);
@@ -3782,7 +3747,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
static u64 intel_get_required_mask(struct device *dev)
{
- if (!iommu_need_mapping(dev))
+ if (iommu_no_mapping(dev))
return dma_direct_get_required_mask(dev);
return DMA_BIT_MASK(32);
}
@@ -5618,9 +5583,13 @@ static int intel_iommu_add_device(struct device *dev)
struct iommu_domain *domain;
struct intel_iommu *iommu;
struct iommu_group *group;
+ u64 dma_mask = *dev->dma_mask;
u8 bus, devfn;
int ret;
+ if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
+ dma_mask = dev->coherent_dma_mask;
+
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
@@ -5640,7 +5609,12 @@ static int intel_iommu_add_device(struct device *dev)
domain = iommu_get_domain_for_dev(dev);
dmar_domain = to_dmar_domain(domain);
if (domain->type == IOMMU_DOMAIN_DMA) {
- if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
+ /*
+ * We check dma_mask >= dma_get_required_mask(dev) because
+ * 32 bit DMA falls back to non-identity mapping.
+ */
+ if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY &&
+ dma_mask >= dma_get_required_mask(dev)) {
ret = iommu_request_dm_for_dev(dev);
if (ret) {
dmar_remove_one_dev_info(dev);
--
2.20.1
_______________________________________________
Linux-mediatek mailing list
Linux-mediatek@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-mediatek
next prev parent reply other threads:[~2019-12-21 15:04 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-12-21 15:03 [PATCH 0/8] Convert the intel iommu driver to the dma-iommu api Tom Murphy
2019-12-21 15:03 ` Tom Murphy [this message]
2019-12-21 23:46 ` [PATCH 1/8] iommu/vt-d: clean up 32bit si_domain assignment Arvind Sankar
2019-12-23 3:00 ` Lu Baolu
2019-12-21 15:03 ` [PATCH 2/8] iommu/vt-d: Use default dma_direct_* mapping functions for direct mapped devices Tom Murphy
2019-12-21 15:03 ` [PATCH 3/8] iommu/vt-d: Remove IOVA handling code from non-dma_ops path Tom Murphy
2020-03-20 6:30 ` Tom Murphy
2020-03-20 7:06 ` Lu Baolu
2019-12-21 15:03 ` [PATCH 4/8] iommu: Handle freelists when using deferred flushing in iommu drivers Tom Murphy
2019-12-21 15:03 ` [PATCH 5/8] iommu: Add iommu_dma_free_cpu_cached_iovas function Tom Murphy
2019-12-21 15:03 ` [PATCH 6/8] iommu: allow the dma-iommu api to use bounce buffers Tom Murphy
2019-12-24 10:20 ` kbuild test robot
2019-12-21 15:03 ` [PATCH 7/8] iommu/vt-d: Convert intel iommu driver to the iommu ops Tom Murphy
2019-12-21 15:04 ` [PATCH 8/8] DO NOT MERGE: iommu: disable list appending in dma-iommu Tom Murphy
2019-12-23 10:37 ` [PATCH 0/8] Convert the intel iommu driver to the dma-iommu api Jani Nikula
2019-12-23 11:29 ` Robin Murphy
2019-12-23 11:41 ` Jani Nikula
2020-03-20 6:28 ` Tom Murphy
2020-05-29 0:00 ` Logan Gunthorpe
2020-05-29 12:45 ` Christoph Hellwig
2020-05-29 19:05 ` Logan Gunthorpe
2020-05-29 21:11 ` Marek Szyprowski
2020-05-29 21:21 ` Logan Gunthorpe
2020-08-24 0:04 ` Tom Murphy
2020-08-26 18:26 ` Alex Deucher
2020-08-27 21:36 ` Logan Gunthorpe
2020-08-27 23:34 ` Tom Murphy
2020-09-03 20:26 ` Tom Murphy
2020-09-08 15:28 ` [Intel-gfx] " Tvrtko Ursulin
2020-09-08 15:44 ` Logan Gunthorpe
2020-09-08 15:56 ` Tvrtko Ursulin
2020-09-08 22:43 ` Tom Murphy
2020-09-09 9:16 ` Tvrtko Ursulin
2020-09-09 12:55 ` Tvrtko Ursulin
2020-09-10 13:33 ` Tom Murphy
2020-09-10 13:34 ` Tom Murphy
2020-08-26 18:14 ` Robin Murphy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191221150402.13868-2-murphyt7@tcd.ie \
--to=murphyt7@tcd.ie \
--cc=agross@kernel.org \
--cc=airlied@linux.ie \
--cc=alex.williamson@redhat.com \
--cc=baolu.lu@linux.intel.com \
--cc=bjorn.andersson@linaro.org \
--cc=cohuck@redhat.com \
--cc=daniel@ffwll.ch \
--cc=dri-devel@lists.freedesktop.org \
--cc=dwmw2@infradead.org \
--cc=eric.auger@redhat.com \
--cc=gerald.schaefer@de.ibm.com \
--cc=heiko@sntech.de \
--cc=intel-gfx@lists.freedesktop.org \
--cc=iommu@lists.linux-foundation.org \
--cc=jani.nikula@linux.intel.com \
--cc=jean-philippe@linaro.org \
--cc=jonathanh@nvidia.com \
--cc=joonas.lahtinen@linux.intel.com \
--cc=joro@8bytes.org \
--cc=julien.grall@arm.com \
--cc=kgene@kernel.org \
--cc=krzk@kernel.org \
--cc=kvm@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mediatek@lists.infradead.org \
--cc=linux-rockchip@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-samsung-soc@vger.kernel.org \
--cc=linux-tegra@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=matthias.bgg@gmail.com \
--cc=maz@kernel.org \
--cc=robdclark@gmail.com \
--cc=robin.murphy@arm.com \
--cc=rodrigo.vivi@intel.com \
--cc=tglx@linutronix.de \
--cc=thierry.reding@gmail.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).