All of lore.kernel.org
 help / color / mirror / Atom feed
* dma-direct fixes and cleanups v3
@ 2021-11-11  6:50 Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 01/11] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
                   ` (11 more replies)
  0 siblings, 12 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Hi all,

Linus complained about the complex flow in dma_direct_alloc, so this
tries to simplify it a bit, and while I was at it I also made sure that
unencrypted pages never leak back into the page allocator.

Changes since v2:
 - don't call dma_set_decrypted on remapped memory
 - move the leak printk into dma_set_encrypted
 - add another local variable to clean up dma_direct_alloc
 - return NULL when the is no way to make the memory coherent

Changes since v1:
 - fix a missing return
 - add a new patch to fix a pre-existing missing unmap
 - various additional cleanups
 
Diffstat:
 direct.c |  234 +++++++++++++++++++++++++++++++++++++--------------------------
 1 file changed, 138 insertions(+), 96 deletions(-)
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH 01/11] dma-direct: factor out dma_set_{de,en}crypted helpers
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 02/11] dma-direct: don't call dma_set_decrypted for remapped allocations Christoph Hellwig
                   ` (10 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Factor out helpers the make dealing with memory encryption a little less
cumbersome.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
---
 kernel/dma/direct.c | 56 ++++++++++++++++++++-------------------------
 1 file changed, 25 insertions(+), 31 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4c6c5e0635e34..d4d54af31a341 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
 }
 
+static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
+{
+	if (!force_dma_unencrypted(dev))
+		return 0;
+	return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
+static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
+{
+	if (!force_dma_unencrypted(dev))
+		return 0;
+	return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
 				    size_t size)
 {
@@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 {
 	struct page *page;
 	void *ret;
-	int err;
 
 	size = PAGE_ALIGN(size);
 	if (attrs & DMA_ATTR_NO_WARN)
@@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 				__builtin_return_address(0));
 		if (!ret)
 			goto out_free_pages;
-		if (force_dma_unencrypted(dev)) {
-			err = set_memory_decrypted((unsigned long)ret,
-						   1 << get_order(size));
-			if (err)
-				goto out_free_pages;
-		}
+		if (dma_set_decrypted(dev, ret, size))
+			goto out_free_pages;
 		memset(ret, 0, size);
 		goto done;
 	}
@@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	}
 
 	ret = page_address(page);
-	if (force_dma_unencrypted(dev)) {
-		err = set_memory_decrypted((unsigned long)ret,
-					   1 << get_order(size));
-		if (err)
-			goto out_free_pages;
-	}
-
+	if (dma_set_decrypted(dev, ret, size))
+		goto out_free_pages;
 	memset(ret, 0, size);
 
 	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
@@ -259,13 +263,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	return ret;
 
 out_encrypt_pages:
-	if (force_dma_unencrypted(dev)) {
-		err = set_memory_encrypted((unsigned long)page_address(page),
-					   1 << get_order(size));
-		/* If memory cannot be re-encrypted, it must be leaked */
-		if (err)
-			return NULL;
-	}
+	/* If memory cannot be re-encrypted, it must be leaked */
+	if (dma_set_encrypted(dev, page_address(page), size))
+		return NULL;
 out_free_pages:
 	__dma_direct_free_pages(dev, page, size);
 	return NULL;
@@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
 		return;
 
-	if (force_dma_unencrypted(dev))
-		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
 
 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
 		vunmap(cpu_addr);
@@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	}
 
 	ret = page_address(page);
-	if (force_dma_unencrypted(dev)) {
-		if (set_memory_decrypted((unsigned long)ret,
-				1 << get_order(size)))
-			goto out_free_pages;
-	}
+	if (dma_set_decrypted(dev, ret, size))
+		goto out_free_pages;
 	memset(ret, 0, size);
 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
 	return page;
@@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, vaddr, size))
 		return;
 
-	if (force_dma_unencrypted(dev))
-		set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
-
+	dma_set_encrypted(dev, vaddr, 1 << page_order);
 	__dma_direct_free_pages(dev, page, size);
 }
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 02/11] dma-direct: don't call dma_set_decrypted for remapped allocations
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 01/11] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 03/11] dma-direct: always leak memory that can't be re-encrypted Christoph Hellwig
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Remapped allocations handle the encrypted bit through the pgprot passed
to vmap, so there is no call dma_set_decrypted.  Note that this case is
currently entirely theoretical as no valid kernel configuration supports
remapped allocations and memory encryption currently.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index d4d54af31a341..996ba4edb2fa3 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -229,8 +229,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 				__builtin_return_address(0));
 		if (!ret)
 			goto out_free_pages;
-		if (dma_set_decrypted(dev, ret, size))
-			goto out_free_pages;
 		memset(ret, 0, size);
 		goto done;
 	}
@@ -304,12 +302,13 @@ void dma_direct_free(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
 		return;
 
-	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
-
-	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
+	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
 		vunmap(cpu_addr);
-	else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
-		arch_dma_clear_uncached(cpu_addr, size);
+	} else {
+		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
+			arch_dma_clear_uncached(cpu_addr, size);
+		dma_set_encrypted(dev, cpu_addr, 1 << page_order);
+	}
 
 	__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
 }
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 03/11] dma-direct: always leak memory that can't be re-encrypted
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 01/11] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 02/11] dma-direct: don't call dma_set_decrypted for remapped allocations Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-12-06 16:32   ` Robin Murphy
  2021-11-11  6:50 ` [PATCH 04/11] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
                   ` (8 subsequent siblings)
  11 siblings, 1 reply; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

We must never unencryped memory go back into the general page pool.
So if we fail to set it back to encrypted when freeing DMA memory, leak
the memory insted and warn the user.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 996ba4edb2fa3..d7a489be48470 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -84,9 +84,14 @@ static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
 
 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
 {
+	int ret;
+
 	if (!force_dma_unencrypted(dev))
 		return 0;
-	return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+	ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+	if (ret)
+		pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
+	return ret;
 }
 
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
@@ -261,7 +266,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	return ret;
 
 out_encrypt_pages:
-	/* If memory cannot be re-encrypted, it must be leaked */
 	if (dma_set_encrypted(dev, page_address(page), size))
 		return NULL;
 out_free_pages:
@@ -307,7 +311,8 @@ void dma_direct_free(struct device *dev, size_t size,
 	} else {
 		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
 			arch_dma_clear_uncached(cpu_addr, size);
-		dma_set_encrypted(dev, cpu_addr, 1 << page_order);
+		if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
+			return;
 	}
 
 	__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
@@ -361,7 +366,8 @@ void dma_direct_free_pages(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, vaddr, size))
 		return;
 
-	dma_set_encrypted(dev, vaddr, 1 << page_order);
+	if (dma_set_encrypted(dev, vaddr, 1 << page_order))
+		return;
 	__dma_direct_free_pages(dev, page, size);
 }
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 04/11] dma-direct: clean up the remapping checks in dma_direct_alloc
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (2 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 03/11] dma-direct: always leak memory that can't be re-encrypted Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-12-06 16:33   ` Robin Murphy
  2021-11-11  6:50 ` [PATCH 05/11] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Add two local variables to track if we want to remap the returned
address using vmap or call dma_set_uncached and use that to simplify
the code flow.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 48 ++++++++++++++++++++++++---------------------
 1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index d7a489be48470..3d1718dc077e9 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -171,6 +171,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 void *dma_direct_alloc(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
+	bool remap = false, set_uncached = false;
 	struct page *page;
 	void *ret;
 
@@ -222,9 +223,25 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	if (!page)
 		return NULL;
 
-	if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	     !dev_is_dma_coherent(dev)) ||
-	    (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
+		remap = true;
+	} else if (PageHighMem(page)) {
+		/*
+		 * Depending on the cma= arguments and per-arch setup,
+		 * dma_alloc_contiguous could return highmem pages.
+		 * Without remapping there is no way to return them here, so
+		 * log an error and fail.
+		 */
+		if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
+			dev_info(dev, "Rejecting highmem page from CMA.\n");
+			goto out_free_pages;
+		}
+		remap = true;
+	} else if (!dev_is_dma_coherent(dev) &&
+		   IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
+		set_uncached = true;
+
+	if (remap) {
 		/* remove any dirty cache lines on the kernel alias */
 		arch_dma_prep_coherent(page, size);
 
@@ -234,34 +251,21 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 				__builtin_return_address(0));
 		if (!ret)
 			goto out_free_pages;
-		memset(ret, 0, size);
-		goto done;
-	}
-
-	if (PageHighMem(page)) {
-		/*
-		 * Depending on the cma= arguments and per-arch setup
-		 * dma_alloc_contiguous could return highmem pages.
-		 * Without remapping there is no way to return them here,
-		 * so log an error and fail.
-		 */
-		dev_info(dev, "Rejecting highmem page from CMA.\n");
-		goto out_free_pages;
+	} else {
+		ret = page_address(page);
+		if (dma_set_decrypted(dev, ret, size))
+			goto out_free_pages;
 	}
 
-	ret = page_address(page);
-	if (dma_set_decrypted(dev, ret, size))
-		goto out_free_pages;
 	memset(ret, 0, size);
 
-	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !dev_is_dma_coherent(dev)) {
+	if (set_uncached) {
 		arch_dma_prep_coherent(page, size);
 		ret = arch_dma_set_uncached(ret, size);
 		if (IS_ERR(ret))
 			goto out_encrypt_pages;
 	}
-done:
+
 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
 	return ret;
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 05/11] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (3 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 04/11] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 06/11] dma-direct: refactor the !coherent checks in dma_direct_alloc Christoph Hellwig
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Split the code for DMA_ATTR_NO_KERNEL_MAPPING allocations into a separate
helper to make dma_direct_alloc a little more readable.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: David Rientjes <rientjes@google.com>
---
 kernel/dma/direct.c | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 3d1718dc077e9..01104660ec439 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -168,6 +168,24 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 	return ret;
 }
 
+static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t gfp)
+{
+	struct page *page;
+
+	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
+	if (!page)
+		return NULL;
+
+	/* remove any dirty cache lines on the kernel alias */
+	if (!PageHighMem(page))
+		arch_dma_prep_coherent(page, size);
+
+	/* return the page pointer as the opaque cookie */
+	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+	return page;
+}
+
 void *dma_direct_alloc(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
@@ -180,17 +198,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		gfp |= __GFP_NOWARN;
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
-		page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
-		if (!page)
-			return NULL;
-		/* remove any dirty cache lines on the kernel alias */
-		if (!PageHighMem(page))
-			arch_dma_prep_coherent(page, size);
-		*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
-		/* return the page pointer as the opaque cookie */
-		return page;
-	}
+	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
+		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
 
 	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
 	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 06/11] dma-direct: refactor the !coherent checks in dma_direct_alloc
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (4 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 05/11] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-12-06 16:33   ` Robin Murphy
  2021-11-11  6:50 ` [PATCH 07/11] dma-direct: fail allocations that can't be made coherent Christoph Hellwig
                   ` (5 subsequent siblings)
  11 siblings, 1 reply; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Add a big central !dev_is_dma_coherent(dev) block to deal with as much
as of the uncached allocation schemes and document the schemes a bit
better.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 66 ++++++++++++++++++++++++++++-----------------
 1 file changed, 41 insertions(+), 25 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 01104660ec439..f9658fe18498c 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -201,29 +201,49 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
 		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
 
-	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
-	    !dev_is_dma_coherent(dev) &&
-	    !is_swiotlb_for_alloc(dev))
-		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
+	if (!dev_is_dma_coherent(dev)) {
+		/*
+		 * Fallback to the arch handler if it exists.  This should
+		 * eventually go away.
+		 */
+		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
+		    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+		    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
+		    !is_swiotlb_for_alloc(dev))
+			return arch_dma_alloc(dev, size, dma_handle, gfp,
+					      attrs);
 
-	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
-	    !dev_is_dma_coherent(dev))
-		return dma_alloc_from_global_coherent(dev, size, dma_handle);
+		/*
+		 * If there is a global pool, always allocate from it for
+		 * non-coherent devices.
+		 */
+		if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
+			return dma_alloc_from_global_coherent(dev, size,
+					dma_handle);
+
+		/*
+		 * Otherwise remap if the architecture is asking for it.  But
+		 * given that remapping memory is a blocking operation we'll
+		 * instead have to dip into the atomic pools.
+		 */
+		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
+		if (remap) {
+			if (!gfpflags_allow_blocking(gfp) &&
+			    !is_swiotlb_for_alloc(dev))
+				return dma_direct_alloc_from_pool(dev, size,
+						dma_handle, gfp);
+		} else {
+			if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
+				set_uncached = true;
+		}
+	}
 
 	/*
-	 * Remapping or decrypting memory may block. If either is required and
-	 * we can't block, allocate the memory from the atomic pools.
-	 * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
-	 * set up another device coherent pool by shared-dma-pool and use
-	 * dma_alloc_from_dev_coherent instead.
+	 * Decrypting memory may block, so allocate the memory from the atomic
+	 * pools if we can't block.
 	 */
 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-	    !gfpflags_allow_blocking(gfp) &&
-	    (force_dma_unencrypted(dev) ||
-	     (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	      !dev_is_dma_coherent(dev))) &&
+	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
 	    !is_swiotlb_for_alloc(dev))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
@@ -231,10 +251,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
 	if (!page)
 		return NULL;
-
-	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
-		remap = true;
-	} else if (PageHighMem(page)) {
+	if (PageHighMem(page)) {
 		/*
 		 * Depending on the cma= arguments and per-arch setup,
 		 * dma_alloc_contiguous could return highmem pages.
@@ -246,9 +263,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 			goto out_free_pages;
 		}
 		remap = true;
-	} else if (!dev_is_dma_coherent(dev) &&
-		   IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
-		set_uncached = true;
+		set_uncached = false;
+	}
 
 	if (remap) {
 		/* remove any dirty cache lines on the kernel alias */
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 07/11] dma-direct: fail allocations that can't be made coherent
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (5 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 06/11] dma-direct: refactor the !coherent checks in dma_direct_alloc Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-12-06 16:33   ` Robin Murphy
  2021-11-11  6:50 ` [PATCH 08/11] dma-direct: warn if there is no pool for force unencrypted allocations Christoph Hellwig
                   ` (4 subsequent siblings)
  11 siblings, 1 reply; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

If the architecture can't remap or set an address uncached there is no way
to fullfill a request for a coherent allocation.  Return NULL in that case.
Note that this case currently does not happen, so this is a theoretical
fixup and/or a preparation for eventually supporting platforms that
can't support coherent allocations with the generic code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f9658fe18498c..a13017656ecae 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -233,8 +233,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 				return dma_direct_alloc_from_pool(dev, size,
 						dma_handle, gfp);
 		} else {
-			if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
-				set_uncached = true;
+			if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
+				return NULL;
+			set_uncached = true;
 		}
 	}
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 08/11] dma-direct: warn if there is no pool for force unencrypted allocations
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (6 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 07/11] dma-direct: fail allocations that can't be made coherent Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 09/11] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals Christoph Hellwig
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Instead of blindly running into a blocking operation for a non-blocking gfp,
return NULL and spew an error.  Note that Kconfig prevents this for all
currently relevant platforms, and this is just a debug check.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
---
 kernel/dma/direct.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index a13017656ecae..84226a764471b 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -159,6 +159,9 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 	u64 phys_mask;
 	void *ret;
 
+	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
+		return NULL;
+
 	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 					   &phys_mask);
 	page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
@@ -243,8 +246,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	 * Decrypting memory may block, so allocate the memory from the atomic
 	 * pools if we can't block.
 	 */
-	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
 	    !is_swiotlb_for_alloc(dev))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
@@ -354,8 +356,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	struct page *page;
 	void *ret;
 
-	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
 	    !is_swiotlb_for_alloc(dev))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 09/11] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (7 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 08/11] dma-direct: warn if there is no pool for force unencrypted allocations Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 10/11] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages Christoph Hellwig
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

swiotlb_alloc and swiotlb_free are properly stubbed out if
CONFIG_DMA_RESTRICTED_POOL is not set, so skip the extra checks.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
---
 kernel/dma/direct.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 84226a764471b..cf75bfb2f499e 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -97,8 +97,7 @@ static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
 				    size_t size)
 {
-	if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
-	    swiotlb_free(dev, page, size))
+	if (swiotlb_free(dev, page, size))
 		return;
 	dma_free_contiguous(dev, page, size);
 }
@@ -114,8 +113,7 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 
 	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 					   &phys_limit);
-	if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
-	    is_swiotlb_for_alloc(dev)) {
+	if (is_swiotlb_for_alloc(dev)) {
 		page = swiotlb_alloc(dev, size);
 		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
 			__dma_direct_free_pages(dev, page, size);
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 10/11] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (8 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 09/11] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
  2021-11-11  6:50 ` [PATCH 11/11] dma-direct: add a dma_direct_use_pool helper Christoph Hellwig
  2021-11-16 11:31 ` dma-direct fixes and cleanups v3 Robin Murphy
  11 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Add a new helper to deal with the swiotlb case.  This keeps the code
nicely boundled and removes the not required call to
dma_direct_optimal_gfp_mask for the swiotlb case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
---
 kernel/dma/direct.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index cf75bfb2f499e..924937c54e8ab 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -102,6 +102,18 @@ static void __dma_direct_free_pages(struct device *dev, struct page *page,
 	dma_free_contiguous(dev, page, size);
 }
 
+static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
+{
+	struct page *page = swiotlb_alloc(dev, size);
+
+	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+		swiotlb_free(dev, page, size);
+		return NULL;
+	}
+
+	return page;
+}
+
 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 		gfp_t gfp)
 {
@@ -111,17 +123,11 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 
 	WARN_ON_ONCE(!PAGE_ALIGNED(size));
 
+	if (is_swiotlb_for_alloc(dev))
+		return dma_direct_alloc_swiotlb(dev, size);
+
 	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 					   &phys_limit);
-	if (is_swiotlb_for_alloc(dev)) {
-		page = swiotlb_alloc(dev, size);
-		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-			__dma_direct_free_pages(dev, page, size);
-			return NULL;
-		}
-		return page;
-	}
-
 	page = dma_alloc_contiguous(dev, size, gfp);
 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
 		dma_free_contiguous(dev, page, size);
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 11/11] dma-direct: add a dma_direct_use_pool helper
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (9 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 10/11] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages Christoph Hellwig
@ 2021-11-11  6:50 ` Christoph Hellwig
       [not found]   ` <CGME20211208154459eucas1p24743399c20b5d1fbc3f519d68d9660a6@eucas1p2.samsung.com>
  2021-11-16 11:31 ` dma-direct fixes and cleanups v3 Robin Murphy
  11 siblings, 1 reply; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-11  6:50 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Add a helper to check if a potentially blocking operation should
dip into the atomic pools.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
---
 kernel/dma/direct.c | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 924937c54e8ab..d0a317ed8f029 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -156,6 +156,15 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 	return page;
 }
 
+/*
+ * Check if a potentially blocking operations needs to dip into the atomic
+ * pools for the given device/gfp.
+ */
+static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
+{
+	return gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
+}
+
 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp)
 {
@@ -235,8 +244,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		 */
 		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
 		if (remap) {
-			if (!gfpflags_allow_blocking(gfp) &&
-			    !is_swiotlb_for_alloc(dev))
+			if (dma_direct_use_pool(dev, gfp))
 				return dma_direct_alloc_from_pool(dev, size,
 						dma_handle, gfp);
 		} else {
@@ -250,8 +258,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	 * Decrypting memory may block, so allocate the memory from the atomic
 	 * pools if we can't block.
 	 */
-	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
-	    !is_swiotlb_for_alloc(dev))
+	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
 	/* we always manually zero the memory once we are done */
@@ -360,8 +367,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	struct page *page;
 	void *ret;
 
-	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
-	    !is_swiotlb_for_alloc(dev))
+	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
 	page = __dma_direct_alloc_pages(dev, size, gfp);
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: dma-direct fixes and cleanups v3
  2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
                   ` (10 preceding siblings ...)
  2021-11-11  6:50 ` [PATCH 11/11] dma-direct: add a dma_direct_use_pool helper Christoph Hellwig
@ 2021-11-16 11:31 ` Robin Murphy
  2021-11-17  5:51   ` Christoph Hellwig
  11 siblings, 1 reply; 23+ messages in thread
From: Robin Murphy @ 2021-11-16 11:31 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-11-11 06:50, Christoph Hellwig wrote:
> Hi all,
> 
> Linus complained about the complex flow in dma_direct_alloc, so this
> tries to simplify it a bit, and while I was at it I also made sure that
> unencrypted pages never leak back into the page allocator.

Before I forget, I've had a quick skim of the remaining patches and 
nothing more stands out. Let me know if you'd like me to find time to 
check everything over in detail again for a proper review, but otherwise 
I reckon we may as well get this baking in -next sooner rather than later.

Cheers,
Robin.

> Changes since v2:
>   - don't call dma_set_decrypted on remapped memory
>   - move the leak printk into dma_set_encrypted
>   - add another local variable to clean up dma_direct_alloc
>   - return NULL when the is no way to make the memory coherent
> 
> Changes since v1:
>   - fix a missing return
>   - add a new patch to fix a pre-existing missing unmap
>   - various additional cleanups
>   
> Diffstat:
>   direct.c |  234 +++++++++++++++++++++++++++++++++++++--------------------------
>   1 file changed, 138 insertions(+), 96 deletions(-)
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: dma-direct fixes and cleanups v3
  2021-11-16 11:31 ` dma-direct fixes and cleanups v3 Robin Murphy
@ 2021-11-17  5:51   ` Christoph Hellwig
  0 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-11-17  5:51 UTC (permalink / raw)
  To: Robin Murphy; +Cc: iommu, Christoph Hellwig, David Rientjes

On Tue, Nov 16, 2021 at 11:31:49AM +0000, Robin Murphy wrote:
> On 2021-11-11 06:50, Christoph Hellwig wrote:
>> Hi all,
>>
>> Linus complained about the complex flow in dma_direct_alloc, so this
>> tries to simplify it a bit, and while I was at it I also made sure that
>> unencrypted pages never leak back into the page allocator.
>
> Before I forget, I've had a quick skim of the remaining patches and nothing 
> more stands out. Let me know if you'd like me to find time to check 
> everything over in detail again for a proper review, but otherwise I reckon 
> we may as well get this baking in -next sooner rather than later.

I'd rather wait for a proper review, I don't think we are in a rush
with any of these patches.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 03/11] dma-direct: always leak memory that can't be re-encrypted
  2021-11-11  6:50 ` [PATCH 03/11] dma-direct: always leak memory that can't be re-encrypted Christoph Hellwig
@ 2021-12-06 16:32   ` Robin Murphy
  2021-12-07 11:48     ` Christoph Hellwig
  0 siblings, 1 reply; 23+ messages in thread
From: Robin Murphy @ 2021-12-06 16:32 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-11-11 06:50, Christoph Hellwig wrote:
> We must never unencryped memory go back into the general page pool.
> So if we fail to set it back to encrypted when freeing DMA memory, leak
> the memory insted and warn the user.

Nit: typos of "unencrypted" and "instead". Plus presumably the first 
sentence was meant to have a "let" or similar in there too.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 14 ++++++++++----
>   1 file changed, 10 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 996ba4edb2fa3..d7a489be48470 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -84,9 +84,14 @@ static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
>   
>   static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
>   {
> +	int ret;
> +
>   	if (!force_dma_unencrypted(dev))
>   		return 0;
> -	return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
> +	ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
> +	if (ret)
> +		pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
> +	return ret;
>   }
>   
>   static void __dma_direct_free_pages(struct device *dev, struct page *page,
> @@ -261,7 +266,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	return ret;
>   
>   out_encrypt_pages:
> -	/* If memory cannot be re-encrypted, it must be leaked */
>   	if (dma_set_encrypted(dev, page_address(page), size))
>   		return NULL;
>   out_free_pages:
> @@ -307,7 +311,8 @@ void dma_direct_free(struct device *dev, size_t size,
>   	} else {
>   		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
>   			arch_dma_clear_uncached(cpu_addr, size);
> -		dma_set_encrypted(dev, cpu_addr, 1 << page_order);
> +		if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
> +			return;
>   	}
>   
>   	__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
> @@ -361,7 +366,8 @@ void dma_direct_free_pages(struct device *dev, size_t size,
>   	    dma_free_from_pool(dev, vaddr, size))
>   		return;
>   
> -	dma_set_encrypted(dev, vaddr, 1 << page_order);
> +	if (dma_set_encrypted(dev, vaddr, 1 << page_order))
> +		return;
>   	__dma_direct_free_pages(dev, page, size);
>   }
>   
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 04/11] dma-direct: clean up the remapping checks in dma_direct_alloc
  2021-11-11  6:50 ` [PATCH 04/11] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
@ 2021-12-06 16:33   ` Robin Murphy
  2021-12-07 11:49     ` Christoph Hellwig
  0 siblings, 1 reply; 23+ messages in thread
From: Robin Murphy @ 2021-12-06 16:33 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-11-11 06:50, Christoph Hellwig wrote:
> Add two local variables to track if we want to remap the returned
> address using vmap or call dma_set_uncached and use that to simplify
> the code flow.

I still wonder about the asymmetry between the remap and set_uncached 
cases WRT the memset(), which stands out even more the further we clean 
things up, but that's another matter.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 48 ++++++++++++++++++++++++---------------------
>   1 file changed, 26 insertions(+), 22 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index d7a489be48470..3d1718dc077e9 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -171,6 +171,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
>   void *dma_direct_alloc(struct device *dev, size_t size,
>   		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
>   {
> +	bool remap = false, set_uncached = false;
>   	struct page *page;
>   	void *ret;
>   
> @@ -222,9 +223,25 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	if (!page)
>   		return NULL;
>   
> -	if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> -	     !dev_is_dma_coherent(dev)) ||
> -	    (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
> +	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
> +		remap = true;
> +	} else if (PageHighMem(page)) {
> +		/*
> +		 * Depending on the cma= arguments and per-arch setup,
> +		 * dma_alloc_contiguous could return highmem pages.
> +		 * Without remapping there is no way to return them here, so
> +		 * log an error and fail.
> +		 */
> +		if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
> +			dev_info(dev, "Rejecting highmem page from CMA.\n");
> +			goto out_free_pages;
> +		}
> +		remap = true;
> +	} else if (!dev_is_dma_coherent(dev) &&
> +		   IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
> +		set_uncached = true;
> +
> +	if (remap) {
>   		/* remove any dirty cache lines on the kernel alias */
>   		arch_dma_prep_coherent(page, size);
>   
> @@ -234,34 +251,21 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   				__builtin_return_address(0));
>   		if (!ret)
>   			goto out_free_pages;
> -		memset(ret, 0, size);
> -		goto done;
> -	}
> -
> -	if (PageHighMem(page)) {
> -		/*
> -		 * Depending on the cma= arguments and per-arch setup
> -		 * dma_alloc_contiguous could return highmem pages.
> -		 * Without remapping there is no way to return them here,
> -		 * so log an error and fail.
> -		 */
> -		dev_info(dev, "Rejecting highmem page from CMA.\n");
> -		goto out_free_pages;
> +	} else {
> +		ret = page_address(page);
> +		if (dma_set_decrypted(dev, ret, size))
> +			goto out_free_pages;
>   	}
>   
> -	ret = page_address(page);
> -	if (dma_set_decrypted(dev, ret, size))
> -		goto out_free_pages;
>   	memset(ret, 0, size);
>   
> -	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> -	    !dev_is_dma_coherent(dev)) {
> +	if (set_uncached) {
>   		arch_dma_prep_coherent(page, size);
>   		ret = arch_dma_set_uncached(ret, size);
>   		if (IS_ERR(ret))
>   			goto out_encrypt_pages;
>   	}
> -done:
> +
>   	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
>   	return ret;
>   
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 06/11] dma-direct: refactor the !coherent checks in dma_direct_alloc
  2021-11-11  6:50 ` [PATCH 06/11] dma-direct: refactor the !coherent checks in dma_direct_alloc Christoph Hellwig
@ 2021-12-06 16:33   ` Robin Murphy
  0 siblings, 0 replies; 23+ messages in thread
From: Robin Murphy @ 2021-12-06 16:33 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-11-11 06:50, Christoph Hellwig wrote:
> Add a big central !dev_is_dma_coherent(dev) block to deal with as much
> as of the uncached allocation schemes and document the schemes a bit
> better.

With a clear mind and a side-by-side diff viewer, indeed the end result 
is much more readable than the patch itself :)

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 66 ++++++++++++++++++++++++++++-----------------
>   1 file changed, 41 insertions(+), 25 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 01104660ec439..f9658fe18498c 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -201,29 +201,49 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
>   		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
>   
> -	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> -	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> -	    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
> -	    !dev_is_dma_coherent(dev) &&
> -	    !is_swiotlb_for_alloc(dev))
> -		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
> +	if (!dev_is_dma_coherent(dev)) {
> +		/*
> +		 * Fallback to the arch handler if it exists.  This should
> +		 * eventually go away.
> +		 */
> +		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> +		    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> +		    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
> +		    !is_swiotlb_for_alloc(dev))
> +			return arch_dma_alloc(dev, size, dma_handle, gfp,
> +					      attrs);
>   
> -	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
> -	    !dev_is_dma_coherent(dev))
> -		return dma_alloc_from_global_coherent(dev, size, dma_handle);
> +		/*
> +		 * If there is a global pool, always allocate from it for
> +		 * non-coherent devices.
> +		 */
> +		if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
> +			return dma_alloc_from_global_coherent(dev, size,
> +					dma_handle);
> +
> +		/*
> +		 * Otherwise remap if the architecture is asking for it.  But
> +		 * given that remapping memory is a blocking operation we'll
> +		 * instead have to dip into the atomic pools.
> +		 */
> +		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
> +		if (remap) {
> +			if (!gfpflags_allow_blocking(gfp) &&
> +			    !is_swiotlb_for_alloc(dev))
> +				return dma_direct_alloc_from_pool(dev, size,
> +						dma_handle, gfp);
> +		} else {
> +			if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
> +				set_uncached = true;
> +		}
> +	}
>   
>   	/*
> -	 * Remapping or decrypting memory may block. If either is required and
> -	 * we can't block, allocate the memory from the atomic pools.
> -	 * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
> -	 * set up another device coherent pool by shared-dma-pool and use
> -	 * dma_alloc_from_dev_coherent instead.
> +	 * Decrypting memory may block, so allocate the memory from the atomic
> +	 * pools if we can't block.
>   	 */
>   	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
> -	    !gfpflags_allow_blocking(gfp) &&
> -	    (force_dma_unencrypted(dev) ||
> -	     (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> -	      !dev_is_dma_coherent(dev))) &&
> +	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
>   	    !is_swiotlb_for_alloc(dev))
>   		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
> @@ -231,10 +251,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
>   	if (!page)
>   		return NULL;
> -
> -	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
> -		remap = true;
> -	} else if (PageHighMem(page)) {
> +	if (PageHighMem(page)) {
>   		/*
>   		 * Depending on the cma= arguments and per-arch setup,
>   		 * dma_alloc_contiguous could return highmem pages.
> @@ -246,9 +263,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   			goto out_free_pages;
>   		}
>   		remap = true;
> -	} else if (!dev_is_dma_coherent(dev) &&
> -		   IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
> -		set_uncached = true;
> +		set_uncached = false;
> +	}
>   
>   	if (remap) {
>   		/* remove any dirty cache lines on the kernel alias */
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 07/11] dma-direct: fail allocations that can't be made coherent
  2021-11-11  6:50 ` [PATCH 07/11] dma-direct: fail allocations that can't be made coherent Christoph Hellwig
@ 2021-12-06 16:33   ` Robin Murphy
  0 siblings, 0 replies; 23+ messages in thread
From: Robin Murphy @ 2021-12-06 16:33 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-11-11 06:50, Christoph Hellwig wrote:
> If the architecture can't remap or set an address uncached there is no way
> to fullfill a request for a coherent allocation.  Return NULL in that case.
> Note that this case currently does not happen, so this is a theoretical
> fixup and/or a preparation for eventually supporting platforms that
> can't support coherent allocations with the generic code.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 5 +++--
>   1 file changed, 3 insertions(+), 2 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index f9658fe18498c..a13017656ecae 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -233,8 +233,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   				return dma_direct_alloc_from_pool(dev, size,
>   						dma_handle, gfp);
>   		} else {
> -			if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
> -				set_uncached = true;
> +			if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
> +				return NULL;
> +			set_uncached = true;
>   		}
>   	}
>   
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 03/11] dma-direct: always leak memory that can't be re-encrypted
  2021-12-06 16:32   ` Robin Murphy
@ 2021-12-07 11:48     ` Christoph Hellwig
  0 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-12-07 11:48 UTC (permalink / raw)
  To: Robin Murphy; +Cc: iommu, Christoph Hellwig, David Rientjes

On Mon, Dec 06, 2021 at 04:32:58PM +0000, Robin Murphy wrote:
> On 2021-11-11 06:50, Christoph Hellwig wrote:
>> We must never unencryped memory go back into the general page pool.
>> So if we fail to set it back to encrypted when freeing DMA memory, leak
>> the memory insted and warn the user.
>
> Nit: typos of "unencrypted" and "instead". Plus presumably the first 
> sentence was meant to have a "let" or similar in there too.

Fixed.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 04/11] dma-direct: clean up the remapping checks in dma_direct_alloc
  2021-12-06 16:33   ` Robin Murphy
@ 2021-12-07 11:49     ` Christoph Hellwig
  2021-12-07 12:43       ` Robin Murphy
  0 siblings, 1 reply; 23+ messages in thread
From: Christoph Hellwig @ 2021-12-07 11:49 UTC (permalink / raw)
  To: Robin Murphy; +Cc: iommu, Christoph Hellwig, David Rientjes

On Mon, Dec 06, 2021 at 04:33:10PM +0000, Robin Murphy wrote:
> On 2021-11-11 06:50, Christoph Hellwig wrote:
>> Add two local variables to track if we want to remap the returned
>> address using vmap or call dma_set_uncached and use that to simplify
>> the code flow.
>
> I still wonder about the asymmetry between the remap and set_uncached cases 
> WRT the memset(), which stands out even more the further we clean things 
> up, but that's another matter.

The memset for the remap case obviously needs to be done after
remapping. OTOH for the set_uncached case the memset is much faste
when done on the cached mapping, which must be done before calling
arch_dma_set_uncached.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 04/11] dma-direct: clean up the remapping checks in dma_direct_alloc
  2021-12-07 11:49     ` Christoph Hellwig
@ 2021-12-07 12:43       ` Robin Murphy
  0 siblings, 0 replies; 23+ messages in thread
From: Robin Murphy @ 2021-12-07 12:43 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: iommu, David Rientjes

On 2021-12-07 11:49, Christoph Hellwig wrote:
> On Mon, Dec 06, 2021 at 04:33:10PM +0000, Robin Murphy wrote:
>> On 2021-11-11 06:50, Christoph Hellwig wrote:
>>> Add two local variables to track if we want to remap the returned
>>> address using vmap or call dma_set_uncached and use that to simplify
>>> the code flow.
>>
>> I still wonder about the asymmetry between the remap and set_uncached cases
>> WRT the memset(), which stands out even more the further we clean things
>> up, but that's another matter.
> 
> The memset for the remap case obviously needs to be done after
> remapping.

For highmem and certain encryption setups, yes, but plain non-cacheable 
remaps like on arm64 could technically be done either way round.

> OTOH for the set_uncached case the memset is much faste
> when done on the cached mapping, which must be done before calling
> arch_dma_set_uncached.

Indeed the memset() itself will be faster, but a fair amount of that 
"saving" will just be punting work to arch_dma_prep_coherent() - 
ultimately the same number of bytes of zeros has to be pushed out into 
the memory system either way. It should certainly be somewhat more 
efficient in that any previously-dirty cache lines won't get written out 
twice, and background cache eviction allows a bit more overlap of 
operations, but I'm wondering how significant a difference it really is 
in practice.

Really, though, I'm thinking ahead to the remap-in-place idea using 
set_memory_*() or similar, and where that might fit into all this, as it 
seems to be about equal parts remap, set_uncached, and neither.

Robin.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 11/11] dma-direct: add a dma_direct_use_pool helper
       [not found]   ` <CGME20211208154459eucas1p24743399c20b5d1fbc3f519d68d9660a6@eucas1p2.samsung.com>
@ 2021-12-08 15:44     ` Marek Szyprowski
  2021-12-08 15:48       ` Christoph Hellwig
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Szyprowski @ 2021-12-08 15:44 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: Robin Murphy, David Rientjes

Hi Christoph,

On 11.11.2021 07:50, Christoph Hellwig wrote:
> Add a helper to check if a potentially blocking operation should
> dip into the atomic pools.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
> ---
>   kernel/dma/direct.c | 18 ++++++++++++------
>   1 file changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 924937c54e8ab..d0a317ed8f029 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -156,6 +156,15 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>   	return page;
>   }
>   
> +/*
> + * Check if a potentially blocking operations needs to dip into the atomic
> + * pools for the given device/gfp.
> + */
> +static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
> +{
> +	return gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
This should be:

return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);

otherwise all dma allocations fail badly on ARM64, what happens on today's linux-next (plenty of "Failed to get suitable pool for XYZ" messages).

Do you want me to send a fixup patch or would you simply fix it in your tree?

> +}
> +
>   static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
>   		dma_addr_t *dma_handle, gfp_t gfp)
>   {
> @@ -235,8 +244,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   		 */
>   		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
>   		if (remap) {
> -			if (!gfpflags_allow_blocking(gfp) &&
> -			    !is_swiotlb_for_alloc(dev))
> +			if (dma_direct_use_pool(dev, gfp))
>   				return dma_direct_alloc_from_pool(dev, size,
>   						dma_handle, gfp);
>   		} else {
> @@ -250,8 +258,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	 * Decrypting memory may block, so allocate the memory from the atomic
>   	 * pools if we can't block.
>   	 */
> -	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
> -	    !is_swiotlb_for_alloc(dev))
> +	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
>   		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
>   	/* we always manually zero the memory once we are done */
> @@ -360,8 +367,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>   	struct page *page;
>   	void *ret;
>   
> -	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
> -	    !is_swiotlb_for_alloc(dev))
> +	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
>   		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
>   	page = __dma_direct_alloc_pages(dev, size, gfp);

Best regards
-- 
Marek Szyprowski, PhD
Samsung R&D Institute Poland

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 11/11] dma-direct: add a dma_direct_use_pool helper
  2021-12-08 15:44     ` Marek Szyprowski
@ 2021-12-08 15:48       ` Christoph Hellwig
  0 siblings, 0 replies; 23+ messages in thread
From: Christoph Hellwig @ 2021-12-08 15:48 UTC (permalink / raw)
  To: Marek Szyprowski; +Cc: iommu, Robin Murphy, Christoph Hellwig, David Rientjes

I've force pushed out the fixed version, thanks!
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2021-12-08 15:49 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-11  6:50 dma-direct fixes and cleanups v3 Christoph Hellwig
2021-11-11  6:50 ` [PATCH 01/11] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
2021-11-11  6:50 ` [PATCH 02/11] dma-direct: don't call dma_set_decrypted for remapped allocations Christoph Hellwig
2021-11-11  6:50 ` [PATCH 03/11] dma-direct: always leak memory that can't be re-encrypted Christoph Hellwig
2021-12-06 16:32   ` Robin Murphy
2021-12-07 11:48     ` Christoph Hellwig
2021-11-11  6:50 ` [PATCH 04/11] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
2021-12-06 16:33   ` Robin Murphy
2021-12-07 11:49     ` Christoph Hellwig
2021-12-07 12:43       ` Robin Murphy
2021-11-11  6:50 ` [PATCH 05/11] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
2021-11-11  6:50 ` [PATCH 06/11] dma-direct: refactor the !coherent checks in dma_direct_alloc Christoph Hellwig
2021-12-06 16:33   ` Robin Murphy
2021-11-11  6:50 ` [PATCH 07/11] dma-direct: fail allocations that can't be made coherent Christoph Hellwig
2021-12-06 16:33   ` Robin Murphy
2021-11-11  6:50 ` [PATCH 08/11] dma-direct: warn if there is no pool for force unencrypted allocations Christoph Hellwig
2021-11-11  6:50 ` [PATCH 09/11] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals Christoph Hellwig
2021-11-11  6:50 ` [PATCH 10/11] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages Christoph Hellwig
2021-11-11  6:50 ` [PATCH 11/11] dma-direct: add a dma_direct_use_pool helper Christoph Hellwig
     [not found]   ` <CGME20211208154459eucas1p24743399c20b5d1fbc3f519d68d9660a6@eucas1p2.samsung.com>
2021-12-08 15:44     ` Marek Szyprowski
2021-12-08 15:48       ` Christoph Hellwig
2021-11-16 11:31 ` dma-direct fixes and cleanups v3 Robin Murphy
2021-11-17  5:51   ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.