linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Marek Szyprowski <m.szyprowski@samsung.com>
To: Linux IOMMU <iommu@lists.linux-foundation.org>,
	linux-media@vger.kernel.org, linux-samsung-soc@vger.kernel.org
Cc: Marek Szyprowski <m.szyprowski@samsung.com>,
	Robin Murphy <robin.murphy@arm.com>,
	Joerg Roedel <joro@8bytes.org>, Christoph Hellwig <hch@lst.de>,
	Sylwester Nawrocki <snawrocki@kernel.org>,
	Krzysztof Kozlowski <krzk@kernel.org>,
	Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>,
	Tomasz Figa <tfiga@chromium.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH 5/8] iommu: dma-iommu: add support for DMA_ATTR_LOW_ADDRESS
Date: Fri, 25 Sep 2020 16:12:15 +0200	[thread overview]
Message-ID: <20200925141218.13550-6-m.szyprowski@samsung.com> (raw)
In-Reply-To: <20200925141218.13550-1-m.szyprowski@samsung.com>

Implement support for the DMA_ATTR_LOW_ADDRESS DMA attribute. If it has
been set, call alloc_iova_first_fit() instead of the alloc_iova_fast() to
allocate the new IOVA from the beginning of the address space.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
---
 drivers/iommu/dma-iommu.c | 50 +++++++++++++++++++++++++++++----------
 1 file changed, 38 insertions(+), 12 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 0ea87023306f..ab39659c727a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -401,6 +401,18 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
 }
 
 #define DMA_ALLOC_IOVA_COHERENT		BIT(0)
+#define DMA_ALLOC_IOVA_FIRST_FIT	BIT(1)
+
+static unsigned int dma_attrs_to_alloc_flags(unsigned long attrs, bool coherent)
+{
+	unsigned int flags = 0;
+
+	if (coherent)
+		flags |= DMA_ALLOC_IOVA_COHERENT;
+	if (attrs & DMA_ATTR_LOW_ADDRESS)
+		flags |= DMA_ALLOC_IOVA_FIRST_FIT;
+	return flags;
+}
 
 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 		struct device *dev, size_t size, unsigned int flags)
@@ -433,13 +445,23 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
 
 	/* Try to get PCI devices a SAC address */
-	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
-		iova = alloc_iova_fast(iovad, iova_len,
-				       DMA_BIT_MASK(32) >> shift, false);
+	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) {
+		if (unlikely(flags & DMA_ALLOC_IOVA_FIRST_FIT))
+			iova = alloc_iova_first_fit(iovad, iova_len,
+						    DMA_BIT_MASK(32) >> shift);
+		else
+			iova = alloc_iova_fast(iovad, iova_len,
+					      DMA_BIT_MASK(32) >> shift, false);
+	}
 
-	if (iova == IOVA_BAD_ADDR)
-		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
-				       true);
+	if (iova == IOVA_BAD_ADDR) {
+		if (unlikely(flags & DMA_ALLOC_IOVA_FIRST_FIT))
+			iova = alloc_iova_first_fit(iovad, iova_len,
+						    dma_limit >> shift);
+		else
+			iova = alloc_iova_fast(iovad, iova_len,
+					       dma_limit >> shift, true);
+	}
 
 	if (iova != IOVA_BAD_ADDR)
 		return (dma_addr_t)iova << shift;
@@ -593,6 +615,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
 	struct iova_domain *iovad = &cookie->iovad;
 	bool coherent = dev_is_dma_coherent(dev);
 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+	unsigned int flags = dma_attrs_to_alloc_flags(attrs, true);
 	pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 	struct page **pages;
@@ -622,7 +645,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
 		return NULL;
 
 	size = iova_align(iovad, size);
-	iova = iommu_dma_alloc_iova(domain, dev, size, DMA_ALLOC_IOVA_COHERENT);
+	iova = iommu_dma_alloc_iova(domain, dev, size, flags);
 	if (iova == DMA_MAPPING_ERROR)
 		goto out_free_pages;
 
@@ -732,12 +755,13 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 		unsigned long offset, size_t size, enum dma_data_direction dir,
 		unsigned long attrs)
 {
+	unsigned int flags = dma_attrs_to_alloc_flags(attrs, false);
 	phys_addr_t phys = page_to_phys(page) + offset;
 	bool coherent = dev_is_dma_coherent(dev);
 	int prot = dma_info_to_prot(dir, coherent, attrs);
 	dma_addr_t dma_handle;
 
-	dma_handle = __iommu_dma_map(dev, phys, size, prot, 0);
+	dma_handle = __iommu_dma_map(dev, phys, size, prot, flags);
 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
 	    dma_handle != DMA_MAPPING_ERROR)
 		arch_sync_dma_for_device(phys, size, dir);
@@ -842,6 +866,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	struct iova_domain *iovad = &cookie->iovad;
 	struct scatterlist *s, *prev = NULL;
 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+	unsigned int flags = dma_attrs_to_alloc_flags(attrs, false);
 	dma_addr_t iova;
 	size_t iova_len = 0;
 	unsigned long mask = dma_get_seg_boundary(dev);
@@ -892,7 +917,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 		prev = s;
 	}
 
-	iova = iommu_dma_alloc_iova(domain, dev, iova_len, 0);
+	iova = iommu_dma_alloc_iova(domain, dev, iova_len, flags);
 	if (iova == DMA_MAPPING_ERROR)
 		goto out_restore_sg;
 
@@ -940,7 +965,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
 		size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
 	return __iommu_dma_map(dev, phys, size,
-			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 0);
+		       dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+		       dma_attrs_to_alloc_flags(attrs, false));
 }
 
 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -1027,6 +1053,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
 static void *iommu_dma_alloc(struct device *dev, size_t size,
 		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
+	unsigned int flags = dma_attrs_to_alloc_flags(attrs, true);
 	bool coherent = dev_is_dma_coherent(dev);
 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
 	struct page *page = NULL;
@@ -1047,8 +1074,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 	if (!cpu_addr)
 		return NULL;
 
-	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
-				  DMA_ALLOC_IOVA_COHERENT);
+	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, flags);
 	if (*handle == DMA_MAPPING_ERROR) {
 		__iommu_dma_free(dev, size, cpu_addr);
 		return NULL;
-- 
2.17.1


  parent reply	other threads:[~2020-09-25 14:12 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20200925141230eucas1p1b4bb8a7e17a887c6ca6a7b8caa7cf9c7@eucas1p1.samsung.com>
2020-09-25 14:12 ` [PATCH 0/8] IOMMU-DMA - support old allocation algorithm used on ARM Marek Szyprowski
     [not found]   ` <CGME20200925141231eucas1p223c342cc989df23e4fc28d97fe3010c5@eucas1p2.samsung.com>
2020-09-25 14:12     ` [PATCH 1/8] dma-mapping: add DMA_ATTR_LOW_ADDRESS attribute Marek Szyprowski
2020-09-25 16:20       ` Christoph Hellwig
     [not found]   ` <CGME20200925141231eucas1p22043424ba07b99f7727f750487a55b6d@eucas1p2.samsung.com>
2020-09-25 14:12     ` [PATCH 2/8] iommu: iova: properly handle 0 as a valid IOVA address Marek Szyprowski
     [not found]   ` <CGME20200925141232eucas1p1c021913b0c2505e6c77e5078cf2be74f@eucas1p1.samsung.com>
2020-09-25 14:12     ` [PATCH 3/8] iommu: iova: add support for 'first-fit' algorithm Marek Szyprowski
     [not found]   ` <CGME20200925141232eucas1p29ad64ca3bc90bb3cbfad9b385cb3d9ef@eucas1p2.samsung.com>
2020-09-25 14:12     ` [PATCH 4/8] iommu: dma-iommu: refactor iommu_dma_alloc_iova() Marek Szyprowski
     [not found]   ` <CGME20200925141233eucas1p2148ace93f157bd631edd8db4e8df664b@eucas1p2.samsung.com>
2020-09-25 14:12     ` Marek Szyprowski [this message]
     [not found]   ` <CGME20200925141234eucas1p13ab5a15da95185dcc99300772d0d72ce@eucas1p1.samsung.com>
2020-09-25 14:12     ` [PATCH 6/8] media: platform: exynos4-is: remove all references to physicall addresses Marek Szyprowski
     [not found]   ` <CGME20200925141234eucas1p2876321230b7f0910242e0e28b3a8388b@eucas1p2.samsung.com>
2020-09-25 14:12     ` [PATCH 7/8] media: platform: exynos4-is: use DMA_ATTR_LOW_ADDRESS Marek Szyprowski
     [not found]   ` <CGME20200925141235eucas1p17c6aceae82acfc424cdc7521938c1510@eucas1p1.samsung.com>
2020-09-25 14:12     ` [PATCH 8/8] media: platform: s5p-mfc: " Marek Szyprowski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200925141218.13550-6-m.szyprowski@samsung.com \
    --to=m.szyprowski@samsung.com \
    --cc=b.zolnierkie@samsung.com \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=krzk@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=linux-samsung-soc@vger.kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=snawrocki@kernel.org \
    --cc=tfiga@chromium.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).