All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Joerg Roedel <joro@8bytes.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	iommu@lists.linux-foundation.org,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH 12/26] iommu/dma: Refactor the page array remapping allocator
Date: Mon, 22 Apr 2019 19:59:28 +0200	[thread overview]
Message-ID: <20190422175942.18788-13-hch@lst.de> (raw)
In-Reply-To: <20190422175942.18788-1-hch@lst.de>

Move the call to dma_common_pages_remap into __iommu_dma_alloc and
rename it to iommu_dma_alloc_remap.  This creates a self-contained
helper for remapped pages allocation and mapping.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/iommu/dma-iommu.c | 54 +++++++++++++++++++--------------------
 1 file changed, 26 insertions(+), 28 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 8e2d9733113e..b8e46e89a60a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -535,9 +535,9 @@ static struct page **__iommu_dma_get_pages(void *cpu_addr)
 }
 
 /**
- * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
+ * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc_remap()
  * @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by __iommu_dma_alloc()
+ * @pages: Array of buffer pages as returned by __iommu_dma_alloc_remap()
  * @size: Size of buffer in bytes
  * @handle: DMA address of buffer
  *
@@ -553,33 +553,35 @@ static void __iommu_dma_free(struct device *dev, struct page **pages,
 }
 
 /**
- * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
  * @dev: Device to allocate memory for. Must be a real device
  *	 attached to an iommu_dma_domain
  * @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
  * @gfp: Allocation flags
  * @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
  *
  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
  * but an IOMMU which supports smaller pages might not map the whole thing.
  *
- * Return: Array of struct page pointers describing the buffer,
- *	   or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
  */
-static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
-		gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle)
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
 	struct iova_domain *iovad = &cookie->iovad;
+	bool coherent = dev_is_dma_coherent(dev);
+	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+	pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 	struct page **pages;
 	struct sg_table sgt;
 	dma_addr_t iova;
-	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+	void *vaddr;
 
-	*handle = DMA_MAPPING_ERROR;
+	*dma_handle = DMA_MAPPING_ERROR;
 
 	min_size = alloc_sizes & -alloc_sizes;
 	if (min_size < PAGE_SIZE) {
@@ -605,7 +607,7 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
 		goto out_free_iova;
 
-	if (!(prot & IOMMU_CACHE)) {
+	if (!(ioprot & IOMMU_CACHE)) {
 		struct scatterlist *sg;
 		int i;
 
@@ -613,14 +615,21 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
 			arch_dma_prep_coherent(sg_page(sg), sg->length);
 	}
 
-	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
 			< size)
 		goto out_free_sg;
 
-	*handle = iova;
+	vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+			__builtin_return_address(0));
+	if (!vaddr)
+		goto out_unmap;
+
+	*dma_handle = iova;
 	sg_free_table(&sgt);
-	return pages;
+	return vaddr;
 
+out_unmap:
+	__iommu_dma_unmap(dev, iova, size);
 out_free_sg:
 	sg_free_table(&sgt);
 out_free_iova:
@@ -989,18 +998,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 						    size >> PAGE_SHIFT);
 		}
 	} else {
-		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
-		struct page **pages;
-
-		pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
-					handle);
-		if (!pages)
-			return NULL;
-
-		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
-					      __builtin_return_address(0));
-		if (!addr)
-			__iommu_dma_free(dev, pages, iosize, handle);
+		addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
 	}
 	return addr;
 }
@@ -1014,7 +1012,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	/*
 	 * @cpu_addr will be one of 4 things depending on how it was allocated:
 	 * - A remapped array of pages for contiguous allocations.
-	 * - A remapped array of pages from __iommu_dma_alloc(), for all
+	 * - A remapped array of pages from iommu_dma_alloc_remap(), for all
 	 *   non-atomic allocations.
 	 * - A non-cacheable alias from the atomic pool, for atomic
 	 *   allocations by non-coherent devices.
-- 
2.20.1


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
To: Robin Murphy <robin.murphy-5wv7dgnIgG8@public.gmane.org>
Cc: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>,
	Catalin Marinas <catalin.marinas-5wv7dgnIgG8@public.gmane.org>,
	Will Deacon <will.deacon-5wv7dgnIgG8@public.gmane.org>,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org
Subject: [PATCH 12/26] iommu/dma: Refactor the page array remapping allocator
Date: Mon, 22 Apr 2019 19:59:28 +0200	[thread overview]
Message-ID: <20190422175942.18788-13-hch@lst.de> (raw)
In-Reply-To: <20190422175942.18788-1-hch-jcswGhMUV9g@public.gmane.org>

Move the call to dma_common_pages_remap into __iommu_dma_alloc and
rename it to iommu_dma_alloc_remap.  This creates a self-contained
helper for remapped pages allocation and mapping.

Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
---
 drivers/iommu/dma-iommu.c | 54 +++++++++++++++++++--------------------
 1 file changed, 26 insertions(+), 28 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 8e2d9733113e..b8e46e89a60a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -535,9 +535,9 @@ static struct page **__iommu_dma_get_pages(void *cpu_addr)
 }
 
 /**
- * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
+ * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc_remap()
  * @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by __iommu_dma_alloc()
+ * @pages: Array of buffer pages as returned by __iommu_dma_alloc_remap()
  * @size: Size of buffer in bytes
  * @handle: DMA address of buffer
  *
@@ -553,33 +553,35 @@ static void __iommu_dma_free(struct device *dev, struct page **pages,
 }
 
 /**
- * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
  * @dev: Device to allocate memory for. Must be a real device
  *	 attached to an iommu_dma_domain
  * @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
  * @gfp: Allocation flags
  * @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
  *
  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
  * but an IOMMU which supports smaller pages might not map the whole thing.
  *
- * Return: Array of struct page pointers describing the buffer,
- *	   or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
  */
-static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
-		gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle)
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
 	struct iova_domain *iovad = &cookie->iovad;
+	bool coherent = dev_is_dma_coherent(dev);
+	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+	pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 	struct page **pages;
 	struct sg_table sgt;
 	dma_addr_t iova;
-	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+	void *vaddr;
 
-	*handle = DMA_MAPPING_ERROR;
+	*dma_handle = DMA_MAPPING_ERROR;
 
 	min_size = alloc_sizes & -alloc_sizes;
 	if (min_size < PAGE_SIZE) {
@@ -605,7 +607,7 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
 		goto out_free_iova;
 
-	if (!(prot & IOMMU_CACHE)) {
+	if (!(ioprot & IOMMU_CACHE)) {
 		struct scatterlist *sg;
 		int i;
 
@@ -613,14 +615,21 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
 			arch_dma_prep_coherent(sg_page(sg), sg->length);
 	}
 
-	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
 			< size)
 		goto out_free_sg;
 
-	*handle = iova;
+	vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+			__builtin_return_address(0));
+	if (!vaddr)
+		goto out_unmap;
+
+	*dma_handle = iova;
 	sg_free_table(&sgt);
-	return pages;
+	return vaddr;
 
+out_unmap:
+	__iommu_dma_unmap(dev, iova, size);
 out_free_sg:
 	sg_free_table(&sgt);
 out_free_iova:
@@ -989,18 +998,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 						    size >> PAGE_SHIFT);
 		}
 	} else {
-		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
-		struct page **pages;
-
-		pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
-					handle);
-		if (!pages)
-			return NULL;
-
-		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
-					      __builtin_return_address(0));
-		if (!addr)
-			__iommu_dma_free(dev, pages, iosize, handle);
+		addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
 	}
 	return addr;
 }
@@ -1014,7 +1012,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	/*
 	 * @cpu_addr will be one of 4 things depending on how it was allocated:
 	 * - A remapped array of pages for contiguous allocations.
-	 * - A remapped array of pages from __iommu_dma_alloc(), for all
+	 * - A remapped array of pages from iommu_dma_alloc_remap(), for all
 	 *   non-atomic allocations.
 	 * - A non-cacheable alias from the atomic pool, for atomic
 	 *   allocations by non-coherent devices.
-- 
2.20.1

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 12/26] iommu/dma: Refactor the page array remapping allocator
Date: Mon, 22 Apr 2019 19:59:28 +0200	[thread overview]
Message-ID: <20190422175942.18788-13-hch@lst.de> (raw)
Message-ID: <20190422175928.CwFhNMuqczEYcAaY6Cm6KmI7bKW5x4YPA5m9h7LZqcA@z> (raw)
In-Reply-To: <20190422175942.18788-1-hch@lst.de>

Move the call to dma_common_pages_remap into __iommu_dma_alloc and
rename it to iommu_dma_alloc_remap.  This creates a self-contained
helper for remapped pages allocation and mapping.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/iommu/dma-iommu.c | 54 +++++++++++++++++++--------------------
 1 file changed, 26 insertions(+), 28 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 8e2d9733113e..b8e46e89a60a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -535,9 +535,9 @@ static struct page **__iommu_dma_get_pages(void *cpu_addr)
 }
 
 /**
- * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
+ * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc_remap()
  * @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by __iommu_dma_alloc()
+ * @pages: Array of buffer pages as returned by __iommu_dma_alloc_remap()
  * @size: Size of buffer in bytes
  * @handle: DMA address of buffer
  *
@@ -553,33 +553,35 @@ static void __iommu_dma_free(struct device *dev, struct page **pages,
 }
 
 /**
- * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
  * @dev: Device to allocate memory for. Must be a real device
  *	 attached to an iommu_dma_domain
  * @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
  * @gfp: Allocation flags
  * @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
  *
  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
  * but an IOMMU which supports smaller pages might not map the whole thing.
  *
- * Return: Array of struct page pointers describing the buffer,
- *	   or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
  */
-static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
-		gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle)
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
 	struct iova_domain *iovad = &cookie->iovad;
+	bool coherent = dev_is_dma_coherent(dev);
+	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+	pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 	struct page **pages;
 	struct sg_table sgt;
 	dma_addr_t iova;
-	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+	void *vaddr;
 
-	*handle = DMA_MAPPING_ERROR;
+	*dma_handle = DMA_MAPPING_ERROR;
 
 	min_size = alloc_sizes & -alloc_sizes;
 	if (min_size < PAGE_SIZE) {
@@ -605,7 +607,7 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
 		goto out_free_iova;
 
-	if (!(prot & IOMMU_CACHE)) {
+	if (!(ioprot & IOMMU_CACHE)) {
 		struct scatterlist *sg;
 		int i;
 
@@ -613,14 +615,21 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
 			arch_dma_prep_coherent(sg_page(sg), sg->length);
 	}
 
-	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
 			< size)
 		goto out_free_sg;
 
-	*handle = iova;
+	vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+			__builtin_return_address(0));
+	if (!vaddr)
+		goto out_unmap;
+
+	*dma_handle = iova;
 	sg_free_table(&sgt);
-	return pages;
+	return vaddr;
 
+out_unmap:
+	__iommu_dma_unmap(dev, iova, size);
 out_free_sg:
 	sg_free_table(&sgt);
 out_free_iova:
@@ -989,18 +998,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 						    size >> PAGE_SHIFT);
 		}
 	} else {
-		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
-		struct page **pages;
-
-		pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
-					handle);
-		if (!pages)
-			return NULL;
-
-		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
-					      __builtin_return_address(0));
-		if (!addr)
-			__iommu_dma_free(dev, pages, iosize, handle);
+		addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
 	}
 	return addr;
 }
@@ -1014,7 +1012,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	/*
 	 * @cpu_addr will be one of 4 things depending on how it was allocated:
 	 * - A remapped array of pages for contiguous allocations.
-	 * - A remapped array of pages from __iommu_dma_alloc(), for all
+	 * - A remapped array of pages from iommu_dma_alloc_remap(), for all
 	 *   non-atomic allocations.
 	 * - A non-cacheable alias from the atomic pool, for atomic
 	 *   allocations by non-coherent devices.
-- 
2.20.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Joerg Roedel <joro@8bytes.org>, Will Deacon <will.deacon@arm.com>,
	linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 12/26] iommu/dma: Refactor the page array remapping allocator
Date: Mon, 22 Apr 2019 19:59:28 +0200	[thread overview]
Message-ID: <20190422175942.18788-13-hch@lst.de> (raw)
In-Reply-To: <20190422175942.18788-1-hch@lst.de>

Move the call to dma_common_pages_remap into __iommu_dma_alloc and
rename it to iommu_dma_alloc_remap.  This creates a self-contained
helper for remapped pages allocation and mapping.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/iommu/dma-iommu.c | 54 +++++++++++++++++++--------------------
 1 file changed, 26 insertions(+), 28 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 8e2d9733113e..b8e46e89a60a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -535,9 +535,9 @@ static struct page **__iommu_dma_get_pages(void *cpu_addr)
 }
 
 /**
- * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
+ * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc_remap()
  * @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by __iommu_dma_alloc()
+ * @pages: Array of buffer pages as returned by __iommu_dma_alloc_remap()
  * @size: Size of buffer in bytes
  * @handle: DMA address of buffer
  *
@@ -553,33 +553,35 @@ static void __iommu_dma_free(struct device *dev, struct page **pages,
 }
 
 /**
- * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
  * @dev: Device to allocate memory for. Must be a real device
  *	 attached to an iommu_dma_domain
  * @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
  * @gfp: Allocation flags
  * @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
  *
  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
  * but an IOMMU which supports smaller pages might not map the whole thing.
  *
- * Return: Array of struct page pointers describing the buffer,
- *	   or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
  */
-static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
-		gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle)
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
 	struct iova_domain *iovad = &cookie->iovad;
+	bool coherent = dev_is_dma_coherent(dev);
+	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+	pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 	struct page **pages;
 	struct sg_table sgt;
 	dma_addr_t iova;
-	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+	void *vaddr;
 
-	*handle = DMA_MAPPING_ERROR;
+	*dma_handle = DMA_MAPPING_ERROR;
 
 	min_size = alloc_sizes & -alloc_sizes;
 	if (min_size < PAGE_SIZE) {
@@ -605,7 +607,7 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
 		goto out_free_iova;
 
-	if (!(prot & IOMMU_CACHE)) {
+	if (!(ioprot & IOMMU_CACHE)) {
 		struct scatterlist *sg;
 		int i;
 
@@ -613,14 +615,21 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
 			arch_dma_prep_coherent(sg_page(sg), sg->length);
 	}
 
-	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
 			< size)
 		goto out_free_sg;
 
-	*handle = iova;
+	vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+			__builtin_return_address(0));
+	if (!vaddr)
+		goto out_unmap;
+
+	*dma_handle = iova;
 	sg_free_table(&sgt);
-	return pages;
+	return vaddr;
 
+out_unmap:
+	__iommu_dma_unmap(dev, iova, size);
 out_free_sg:
 	sg_free_table(&sgt);
 out_free_iova:
@@ -989,18 +998,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 						    size >> PAGE_SHIFT);
 		}
 	} else {
-		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
-		struct page **pages;
-
-		pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
-					handle);
-		if (!pages)
-			return NULL;
-
-		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
-					      __builtin_return_address(0));
-		if (!addr)
-			__iommu_dma_free(dev, pages, iosize, handle);
+		addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
 	}
 	return addr;
 }
@@ -1014,7 +1012,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	/*
 	 * @cpu_addr will be one of 4 things depending on how it was allocated:
 	 * - A remapped array of pages for contiguous allocations.
-	 * - A remapped array of pages from __iommu_dma_alloc(), for all
+	 * - A remapped array of pages from iommu_dma_alloc_remap(), for all
 	 *   non-atomic allocations.
 	 * - A non-cacheable alias from the atomic pool, for atomic
 	 *   allocations by non-coherent devices.
-- 
2.20.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-04-22 18:02 UTC|newest]

Thread overview: 155+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-22 17:59 implement generic dma_map_ops for IOMMUs v3 Christoph Hellwig
2019-04-22 17:59 ` Christoph Hellwig
2019-04-22 17:59 ` Christoph Hellwig
2019-04-22 17:59 ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 01/26] arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 02/26] arm64/iommu: improve mmap bounds checking Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 12:35   ` Robin Murphy
2019-04-29 12:35     ` Robin Murphy
2019-04-29 12:35     ` Robin Murphy
2019-04-29 19:01     ` Christoph Hellwig
2019-04-29 19:01       ` Christoph Hellwig
2019-04-29 19:01       ` Christoph Hellwig
2019-04-30 11:38       ` Robin Murphy
2019-04-30 11:38         ` Robin Murphy
2019-04-30 11:38         ` Robin Murphy
2019-04-22 17:59 ` [PATCH 03/26] dma-mapping: add a Kconfig symbol to indicated arch_dma_prep_coherent presence Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 04/26] iommu/dma: Cleanup dma-iommu.h Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 05/26] iommu/dma: Remove the flush_page callback Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 06/26] iommu/dma: Use for_each_sg in iommu_dma_alloc Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 07/26] iommu/dma: move the arm64 wrappers to common code Christoph Hellwig
2019-06-05  0:47   ` Hillf Danton
2019-06-05  0:47   ` Hillf Danton
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 12:56   ` Robin Murphy
2019-04-29 12:56     ` Robin Murphy
2019-04-29 12:56     ` Robin Murphy
2019-06-03 19:47     ` Jon Hunter
2019-06-03 19:47       ` Jon Hunter
2019-06-03 19:47       ` Jon Hunter
2019-06-03 19:47       ` Jon Hunter
     [not found]       ` <acb46c7f-0855-de30-485f-a6242968f947-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-04  6:05         ` Christoph Hellwig
2019-06-04  6:05           ` Christoph Hellwig
2019-06-04  6:05           ` Christoph Hellwig
2019-06-04  6:05           ` Christoph Hellwig
     [not found]           ` <20190604060554.GA14536-jcswGhMUV9g@public.gmane.org>
2019-06-04 11:35             ` Jon Hunter
2019-06-04 11:35               ` Jon Hunter
2019-06-04 11:35               ` Jon Hunter
2019-06-04 11:35               ` Jon Hunter
2019-04-22 17:59 ` [PATCH 08/26] iommu/dma: Move __iommu_dma_map Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 09/26] iommu/dma: Move domain lookup into __iommu_dma_{map,unmap} Christoph Hellwig
2019-04-22 17:59   ` [PATCH 09/26] iommu/dma: Move domain lookup into __iommu_dma_{map, unmap} Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 10/26] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 11/26] iommu/dma: Factor out remapped pages lookup Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 13:05   ` Robin Murphy
2019-04-29 13:05     ` Robin Murphy
2019-04-29 13:05     ` Robin Murphy
2019-04-29 19:10     ` Christoph Hellwig
2019-04-29 19:10       ` Christoph Hellwig
2019-04-29 19:10       ` Christoph Hellwig
2019-04-22 17:59 ` Christoph Hellwig [this message]
2019-04-22 17:59   ` [PATCH 12/26] iommu/dma: Refactor the page array remapping allocator Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 13:10   ` Robin Murphy
2019-04-29 13:10     ` Robin Murphy
2019-04-29 13:10     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 13/26] iommu/dma: Remove __iommu_dma_free Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 13:18   ` Robin Murphy
2019-04-29 13:18     ` Robin Murphy
2019-04-29 13:18     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 14/26] iommu/dma: Refactor iommu_dma_free Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 13:59   ` Robin Murphy
2019-04-29 13:59     ` Robin Murphy
2019-04-29 13:59     ` Robin Murphy
2019-04-29 19:03     ` Christoph Hellwig
2019-04-29 19:03       ` Christoph Hellwig
2019-04-29 19:03       ` Christoph Hellwig
2019-04-29 19:16       ` Christoph Hellwig
2019-04-29 19:16         ` Christoph Hellwig
2019-04-29 19:16         ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 15/26] iommu/dma: Refactor iommu_dma_alloc Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 16/26] iommu/dma: Don't remap CMA unnecessarily Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 17/26] iommu/dma: Merge the CMA and alloc_pages allocation paths Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 18/26] iommu/dma: Split iommu_dma_free Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 19/26] iommu/dma: Cleanup variable naming in iommu_dma_alloc Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:11   ` Robin Murphy
2019-04-29 14:11     ` Robin Murphy
2019-04-29 14:11     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 20/26] iommu/dma: Refactor iommu_dma_alloc, part 2 Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:45   ` Robin Murphy
2019-04-29 14:45     ` Robin Murphy
2019-04-29 14:45     ` Robin Murphy
2019-04-29 14:45     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 21/26] iommu/dma: Refactor iommu_dma_get_sgtable Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:08   ` Robin Murphy
2019-04-29 14:08     ` Robin Murphy
2019-04-29 14:08     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 22/26] iommu/dma: Refactor iommu_dma_mmap Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:04   ` Robin Murphy
2019-04-29 14:04     ` Robin Murphy
2019-04-29 14:04     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 23/26] iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:46   ` Robin Murphy
2019-04-29 14:46     ` Robin Murphy
2019-04-29 14:46     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 24/26] iommu/dma: Switch copyright boilerplace to SPDX Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 25/26] arm64: switch copyright boilerplace to SPDX in dma-mapping.c Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 26/26] arm64: trim includes " Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 15:00   ` Robin Murphy
2019-04-29 15:00     ` Robin Murphy
2019-04-29 15:00     ` Robin Murphy
2019-04-29 15:03 ` implement generic dma_map_ops for IOMMUs v3 Robin Murphy
2019-04-29 15:03   ` Robin Murphy
2019-04-29 15:03   ` Robin Murphy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190422175942.18788-13-hch@lst.de \
    --to=hch@lst.de \
    --cc=catalin.marinas@arm.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=thomas.lendacky@amd.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.