All of lore.kernel.org
 help / color / mirror / Atom feed
* dma-direct fixes and cleanups v2
@ 2021-10-21  9:06 Christoph Hellwig
  2021-10-21  9:06 ` [PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
                   ` (10 more replies)
  0 siblings, 11 replies; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Hi all,

Linus complained about the complex flow in dma_direct_alloc, so this
tries to simplify it a bit, and while I was at it I also made sure that
unencrypted pages never leak back into the page allocator.

Changes since v1:
 - fix a missing return
 - add a new patch to fix a pre-existing missing unmap
 - various additional cleanups
 
Diffstat:
 direct.c |  234 +++++++++++++++++++++++++++++++++++++--------------------------
 1 file changed, 138 insertions(+), 96 deletions(-)
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:35   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 02/10] dma-direct: unmapped remapped pages when dma_set_decrypted Christoph Hellwig
                   ` (9 subsequent siblings)
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Factor out helpers the make dealing with memory encryption a little less
cumbersome.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 56 ++++++++++++++++++++-------------------------
 1 file changed, 25 insertions(+), 31 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4c6c5e0635e34..d4d54af31a341 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
 }
 
+static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
+{
+	if (!force_dma_unencrypted(dev))
+		return 0;
+	return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
+static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
+{
+	if (!force_dma_unencrypted(dev))
+		return 0;
+	return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
 				    size_t size)
 {
@@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 {
 	struct page *page;
 	void *ret;
-	int err;
 
 	size = PAGE_ALIGN(size);
 	if (attrs & DMA_ATTR_NO_WARN)
@@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 				__builtin_return_address(0));
 		if (!ret)
 			goto out_free_pages;
-		if (force_dma_unencrypted(dev)) {
-			err = set_memory_decrypted((unsigned long)ret,
-						   1 << get_order(size));
-			if (err)
-				goto out_free_pages;
-		}
+		if (dma_set_decrypted(dev, ret, size))
+			goto out_free_pages;
 		memset(ret, 0, size);
 		goto done;
 	}
@@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	}
 
 	ret = page_address(page);
-	if (force_dma_unencrypted(dev)) {
-		err = set_memory_decrypted((unsigned long)ret,
-					   1 << get_order(size));
-		if (err)
-			goto out_free_pages;
-	}
-
+	if (dma_set_decrypted(dev, ret, size))
+		goto out_free_pages;
 	memset(ret, 0, size);
 
 	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
@@ -259,13 +263,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	return ret;
 
 out_encrypt_pages:
-	if (force_dma_unencrypted(dev)) {
-		err = set_memory_encrypted((unsigned long)page_address(page),
-					   1 << get_order(size));
-		/* If memory cannot be re-encrypted, it must be leaked */
-		if (err)
-			return NULL;
-	}
+	/* If memory cannot be re-encrypted, it must be leaked */
+	if (dma_set_encrypted(dev, page_address(page), size))
+		return NULL;
 out_free_pages:
 	__dma_direct_free_pages(dev, page, size);
 	return NULL;
@@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
 		return;
 
-	if (force_dma_unencrypted(dev))
-		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
 
 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
 		vunmap(cpu_addr);
@@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	}
 
 	ret = page_address(page);
-	if (force_dma_unencrypted(dev)) {
-		if (set_memory_decrypted((unsigned long)ret,
-				1 << get_order(size)))
-			goto out_free_pages;
-	}
+	if (dma_set_decrypted(dev, ret, size))
+		goto out_free_pages;
 	memset(ret, 0, size);
 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
 	return page;
@@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, vaddr, size))
 		return;
 
-	if (force_dma_unencrypted(dev))
-		set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
-
+	dma_set_encrypted(dev, vaddr, 1 << page_order);
 	__dma_direct_free_pages(dev, page, size);
 }
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 02/10] dma-direct: unmapped remapped pages when dma_set_decrypted
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
  2021-10-21  9:06 ` [PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:35   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 03/10] dma-direct: leak memory that can't be re-encrypted Christoph Hellwig
                   ` (8 subsequent siblings)
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

When dma_set_decrypted fails for the remapping case in dma_direct_alloc
we also need to unmap the pages before freeing them.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index d4d54af31a341..2fef8dd401fe9 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -230,7 +230,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		if (!ret)
 			goto out_free_pages;
 		if (dma_set_decrypted(dev, ret, size))
-			goto out_free_pages;
+			goto out_unmap_pages;
 		memset(ret, 0, size);
 		goto done;
 	}
@@ -266,6 +266,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	/* If memory cannot be re-encrypted, it must be leaked */
 	if (dma_set_encrypted(dev, page_address(page), size))
 		return NULL;
+out_unmap_pages:
+	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(ret))
+		vunmap(ret);
 out_free_pages:
 	__dma_direct_free_pages(dev, page, size);
 	return NULL;
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 03/10] dma-direct: leak memory that can't be re-encrypted
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
  2021-10-21  9:06 ` [PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
  2021-10-21  9:06 ` [PATCH 02/10] dma-direct: unmapped remapped pages when dma_set_decrypted Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:35   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
                   ` (7 subsequent siblings)
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

We must never unencryped memory go back into the general page pool.
So if we fail to set it back to encrypted when freeing DMA memory, leak
the memory insted and warn the user.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 2fef8dd401fe9..60cb75aa6778e 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -263,9 +263,11 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	return ret;
 
 out_encrypt_pages:
-	/* If memory cannot be re-encrypted, it must be leaked */
-	if (dma_set_encrypted(dev, page_address(page), size))
+	if (dma_set_encrypted(dev, page_address(page), size)) {
+		pr_warn_ratelimited(
+			"leaking DMA memory that can't be re-encrypted\n");
 		return NULL;
+	}
 out_unmap_pages:
 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(ret))
 		vunmap(ret);
@@ -307,7 +309,11 @@ void dma_direct_free(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
 		return;
 
-	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
+	if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) {
+		pr_warn_ratelimited(
+			"leaking DMA memory that can't be re-encrypted\n");
+		return;
+	}
 
 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
 		vunmap(cpu_addr);
@@ -365,7 +371,11 @@ void dma_direct_free_pages(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, vaddr, size))
 		return;
 
-	dma_set_encrypted(dev, vaddr, 1 << page_order);
+	if (dma_set_encrypted(dev, vaddr, 1 << page_order)) {
+		pr_warn_ratelimited(
+			"leaking DMA memory that can't be re-encrypted\n");
+		return;
+	}
 	__dma_direct_free_pages(dev, page, size);
 }
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
                   ` (2 preceding siblings ...)
  2021-10-21  9:06 ` [PATCH 03/10] dma-direct: leak memory that can't be re-encrypted Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:35   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 05/10] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
                   ` (6 subsequent siblings)
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Add a local variable to track if we want to remap the returned address
using vmap and use that to simplify the code flow.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 47 +++++++++++++++++++++++----------------------
 1 file changed, 24 insertions(+), 23 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 60cb75aa6778e..a6b6fe72af4d1 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -166,6 +166,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 void *dma_direct_alloc(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
+	bool remap = false;
 	struct page *page;
 	void *ret;
 
@@ -217,9 +218,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	if (!page)
 		return NULL;
 
-	if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	     !dev_is_dma_coherent(dev)) ||
-	    (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
+		remap = true;
+	} else if (PageHighMem(page)) {
+		/*
+		 * Depending on the cma= arguments and per-arch setup,
+		 * dma_alloc_contiguous could return highmem pages.
+		 * Without remapping there is no way to return them here, so
+		 * log an error and fail.
+		 */
+		if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
+			dev_info(dev, "Rejecting highmem page from CMA.\n");
+			goto out_free_pages;
+		}
+		remap = true;
+	}
+
+	if (remap) {
 		/* remove any dirty cache lines on the kernel alias */
 		arch_dma_prep_coherent(page, size);
 
@@ -229,36 +244,22 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 				__builtin_return_address(0));
 		if (!ret)
 			goto out_free_pages;
-		if (dma_set_decrypted(dev, ret, size))
-			goto out_unmap_pages;
-		memset(ret, 0, size);
-		goto done;
-	}
-
-	if (PageHighMem(page)) {
-		/*
-		 * Depending on the cma= arguments and per-arch setup
-		 * dma_alloc_contiguous could return highmem pages.
-		 * Without remapping there is no way to return them here,
-		 * so log an error and fail.
-		 */
-		dev_info(dev, "Rejecting highmem page from CMA.\n");
-		goto out_free_pages;
+	} else {
+		ret = page_address(page);
 	}
 
-	ret = page_address(page);
 	if (dma_set_decrypted(dev, ret, size))
-		goto out_free_pages;
+		goto out_unmap_pages;
 	memset(ret, 0, size);
 
-	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !dev_is_dma_coherent(dev)) {
+	if (!dev_is_dma_coherent(dev) && !remap &&
+	    IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) {
 		arch_dma_prep_coherent(page, size);
 		ret = arch_dma_set_uncached(ret, size);
 		if (IS_ERR(ret))
 			goto out_encrypt_pages;
 	}
-done:
+
 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
 	return ret;
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 05/10] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
                   ` (3 preceding siblings ...)
  2021-10-21  9:06 ` [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:36   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 06/10] dma-direct: refactor the !coherent checks in dma_direct_alloc Christoph Hellwig
                   ` (5 subsequent siblings)
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Split the code for DMA_ATTR_NO_KERNEL_MAPPING allocations into a separate
helper to make dma_direct_alloc a little more readable.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: David Rientjes <rientjes@google.com>
---
 kernel/dma/direct.c | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index a6b6fe72af4d1..4ffdb524942a1 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -163,6 +163,24 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 	return ret;
 }
 
+static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t gfp)
+{
+	struct page *page;
+
+	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
+	if (!page)
+		return NULL;
+
+	/* remove any dirty cache lines on the kernel alias */
+	if (!PageHighMem(page))
+		arch_dma_prep_coherent(page, size);
+
+	/* return the page pointer as the opaque cookie */
+	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+	return page;
+}
+
 void *dma_direct_alloc(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
@@ -175,17 +193,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		gfp |= __GFP_NOWARN;
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
-		page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
-		if (!page)
-			return NULL;
-		/* remove any dirty cache lines on the kernel alias */
-		if (!PageHighMem(page))
-			arch_dma_prep_coherent(page, size);
-		*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
-		/* return the page pointer as the opaque cookie */
-		return page;
-	}
+	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
+		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
 
 	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
 	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 06/10] dma-direct: refactor the !coherent checks in dma_direct_alloc
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
                   ` (4 preceding siblings ...)
  2021-10-21  9:06 ` [PATCH 05/10] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:36   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 07/10] dma-direct: warn if there is no pool for force unencrypted allocations Christoph Hellwig
                   ` (4 subsequent siblings)
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Add a big central !dev_is_dma_coherent(dev) block to deal with as much
as of the uncached allocation schemes and document the schemes a bit
better.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 58 ++++++++++++++++++++++++++++-----------------
 1 file changed, 36 insertions(+), 22 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4ffdb524942a1..d66f37f34ba71 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -196,29 +196,46 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
 		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
 
-	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
-	    !dev_is_dma_coherent(dev) &&
-	    !is_swiotlb_for_alloc(dev))
-		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
+	if (!dev_is_dma_coherent(dev)) {
+		/*
+		 * Fallback to the arch handler if it exists.  This should
+		 * eventually go away.
+		 */
+		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
+		    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+		    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
+		    !is_swiotlb_for_alloc(dev))
+			return arch_dma_alloc(dev, size, dma_handle, gfp,
+					      attrs);
 
-	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
-	    !dev_is_dma_coherent(dev))
-		return dma_alloc_from_global_coherent(dev, size, dma_handle);
+		/*
+		 * If there is a global pool, always allocate from it for
+		 * non-coherent devices.
+		 */
+		if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
+			return dma_alloc_from_global_coherent(dev, size,
+					dma_handle);
+
+		/*
+		 * Otherwise remap if the architecture is asking for it.  But
+		 * given that remapping memory is a blocking operation we'll
+		 * instead have to dip into the atomic pools.
+		 */
+		if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
+			if (!gfpflags_allow_blocking(gfp) &&
+			    !is_swiotlb_for_alloc(dev))
+				return dma_direct_alloc_from_pool(dev, size,
+						dma_handle, gfp);
+			remap = true;
+		}
+	}
 
 	/*
-	 * Remapping or decrypting memory may block. If either is required and
-	 * we can't block, allocate the memory from the atomic pools.
-	 * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
-	 * set up another device coherent pool by shared-dma-pool and use
-	 * dma_alloc_from_dev_coherent instead.
+	 * Decrypting memory may block, so allocate the memory from the atomic
+	 * pools if we can't block.
 	 */
 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-	    !gfpflags_allow_blocking(gfp) &&
-	    (force_dma_unencrypted(dev) ||
-	     (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	      !dev_is_dma_coherent(dev))) &&
+	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
 	    !is_swiotlb_for_alloc(dev))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
@@ -226,10 +243,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
 	if (!page)
 		return NULL;
-
-	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
-		remap = true;
-	} else if (PageHighMem(page)) {
+	if (PageHighMem(page)) {
 		/*
 		 * Depending on the cma= arguments and per-arch setup,
 		 * dma_alloc_contiguous could return highmem pages.
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 07/10] dma-direct: warn if there is no pool for force unencrypted allocations
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
                   ` (5 preceding siblings ...)
  2021-10-21  9:06 ` [PATCH 06/10] dma-direct: refactor the !coherent checks in dma_direct_alloc Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:36   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 08/10] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals Christoph Hellwig
                   ` (3 subsequent siblings)
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Instead of blindly running into a blocking operation for a non-blocking gfp,
return NULL and spew an error.  Note that Kconfig prevents this for all
currently relevant platforms, and this is just a debug check.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index d66f37f34ba71..680fe10410645 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -154,6 +154,9 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 	u64 phys_mask;
 	void *ret;
 
+	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
+		return NULL;
+
 	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 					   &phys_mask);
 	page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
@@ -234,8 +237,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	 * Decrypting memory may block, so allocate the memory from the atomic
 	 * pools if we can't block.
 	 */
-	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
 	    !is_swiotlb_for_alloc(dev))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
@@ -353,8 +355,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	struct page *page;
 	void *ret;
 
-	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
 	    !is_swiotlb_for_alloc(dev))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 08/10] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
                   ` (6 preceding siblings ...)
  2021-10-21  9:06 ` [PATCH 07/10] dma-direct: warn if there is no pool for force unencrypted allocations Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:37   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 09/10] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages Christoph Hellwig
                   ` (2 subsequent siblings)
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

swiotlb_alloc and swiotlb_free are properly stubbed out if
CONFIG_DMA_RESTRICTED_POOL is not set, so skip the extra checks.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 680fe10410645..f4ac2e1cdf469 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -92,8 +92,7 @@ static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
 				    size_t size)
 {
-	if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
-	    swiotlb_free(dev, page, size))
+	if (swiotlb_free(dev, page, size))
 		return;
 	dma_free_contiguous(dev, page, size);
 }
@@ -109,8 +108,7 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 
 	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 					   &phys_limit);
-	if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
-	    is_swiotlb_for_alloc(dev)) {
+	if (is_swiotlb_for_alloc(dev)) {
 		page = swiotlb_alloc(dev, size);
 		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
 			__dma_direct_free_pages(dev, page, size);
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 09/10] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
                   ` (7 preceding siblings ...)
  2021-10-21  9:06 ` [PATCH 08/10] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:37   ` Robin Murphy
  2021-10-21  9:06 ` [PATCH 10/10] dma-direct: add a dma_direct_use_pool helper Christoph Hellwig
  2021-11-04  8:54 ` dma-direct fixes and cleanups v2 Christoph Hellwig
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Add a new helper to deal with the swiotlb case.  This keeps the code
nicely boundled and removes the not required call to
dma_direct_optimal_gfp_mask for the swiotlb case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f4ac2e1cdf469..f2ec40f5733fc 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -97,6 +97,18 @@ static void __dma_direct_free_pages(struct device *dev, struct page *page,
 	dma_free_contiguous(dev, page, size);
 }
 
+static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
+{
+	struct page *page = swiotlb_alloc(dev, size);
+
+	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+		swiotlb_free(dev, page, size);
+		return NULL;
+	}
+
+	return page;
+}
+
 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 		gfp_t gfp)
 {
@@ -106,17 +118,11 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 
 	WARN_ON_ONCE(!PAGE_ALIGNED(size));
 
+	if (is_swiotlb_for_alloc(dev))
+		return dma_direct_alloc_swiotlb(dev, size);
+
 	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 					   &phys_limit);
-	if (is_swiotlb_for_alloc(dev)) {
-		page = swiotlb_alloc(dev, size);
-		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-			__dma_direct_free_pages(dev, page, size);
-			return NULL;
-		}
-		return page;
-	}
-
 	page = dma_alloc_contiguous(dev, size, gfp);
 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
 		dma_free_contiguous(dev, page, size);
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [PATCH 10/10] dma-direct: add a dma_direct_use_pool helper
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
                   ` (8 preceding siblings ...)
  2021-10-21  9:06 ` [PATCH 09/10] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages Christoph Hellwig
@ 2021-10-21  9:06 ` Christoph Hellwig
  2021-11-04 12:37   ` Robin Murphy
  2021-11-04  8:54 ` dma-direct fixes and cleanups v2 Christoph Hellwig
  10 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-10-21  9:06 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Add a helper to check if a potentially blocking operation should
dip into the atomic pools.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f2ec40f5733fc..babf79c16c041 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -151,6 +151,15 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 	return page;
 }
 
+/*
+ * Check if a potentially blocking operations needs to dip into the atomic
+ * pools for the given device/gfp.
+ */
+static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
+{
+	return gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
+}
+
 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp)
 {
@@ -229,8 +238,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		 * instead have to dip into the atomic pools.
 		 */
 		if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
-			if (!gfpflags_allow_blocking(gfp) &&
-			    !is_swiotlb_for_alloc(dev))
+			if (dma_direct_use_pool(dev, gfp))
 				return dma_direct_alloc_from_pool(dev, size,
 						dma_handle, gfp);
 			remap = true;
@@ -241,8 +249,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	 * Decrypting memory may block, so allocate the memory from the atomic
 	 * pools if we can't block.
 	 */
-	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
-	    !is_swiotlb_for_alloc(dev))
+	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
 	/* we always manually zero the memory once we are done */
@@ -359,8 +366,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	struct page *page;
 	void *ret;
 
-	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
-	    !is_swiotlb_for_alloc(dev))
+	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
 	page = __dma_direct_alloc_pages(dev, size, gfp);
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* Re: dma-direct fixes and cleanups v2
  2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
                   ` (9 preceding siblings ...)
  2021-10-21  9:06 ` [PATCH 10/10] dma-direct: add a dma_direct_use_pool helper Christoph Hellwig
@ 2021-11-04  8:54 ` Christoph Hellwig
  10 siblings, 0 replies; 29+ messages in thread
From: Christoph Hellwig @ 2021-11-04  8:54 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, David Rientjes

Any comments?

On Thu, Oct 21, 2021 at 11:06:01AM +0200, Christoph Hellwig wrote:
> Hi all,
> 
> Linus complained about the complex flow in dma_direct_alloc, so this
> tries to simplify it a bit, and while I was at it I also made sure that
> unencrypted pages never leak back into the page allocator.
> 
> Changes since v1:
>  - fix a missing return
>  - add a new patch to fix a pre-existing missing unmap
>  - various additional cleanups
>  
> Diffstat:
>  direct.c |  234 +++++++++++++++++++++++++++++++++++++--------------------------
>  1 file changed, 138 insertions(+), 96 deletions(-)
> _______________________________________________
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu
---end quoted text---
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers
  2021-10-21  9:06 ` [PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
@ 2021-11-04 12:35   ` Robin Murphy
  0 siblings, 0 replies; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:35 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> Factor out helpers the make dealing with memory encryption a little less
> cumbersome.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 56 ++++++++++++++++++++-------------------------
>   1 file changed, 25 insertions(+), 31 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 4c6c5e0635e34..d4d54af31a341 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
>   		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
>   }
>   
> +static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
> +{
> +	if (!force_dma_unencrypted(dev))
> +		return 0;
> +	return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
> +}
> +
> +static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
> +{
> +	if (!force_dma_unencrypted(dev))
> +		return 0;
> +	return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
> +}
> +
>   static void __dma_direct_free_pages(struct device *dev, struct page *page,
>   				    size_t size)
>   {
> @@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   {
>   	struct page *page;
>   	void *ret;
> -	int err;
>   
>   	size = PAGE_ALIGN(size);
>   	if (attrs & DMA_ATTR_NO_WARN)
> @@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   				__builtin_return_address(0));
>   		if (!ret)
>   			goto out_free_pages;
> -		if (force_dma_unencrypted(dev)) {
> -			err = set_memory_decrypted((unsigned long)ret,
> -						   1 << get_order(size));
> -			if (err)
> -				goto out_free_pages;
> -		}
> +		if (dma_set_decrypted(dev, ret, size))
> +			goto out_free_pages;
>   		memset(ret, 0, size);
>   		goto done;
>   	}
> @@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	}
>   
>   	ret = page_address(page);
> -	if (force_dma_unencrypted(dev)) {
> -		err = set_memory_decrypted((unsigned long)ret,
> -					   1 << get_order(size));
> -		if (err)
> -			goto out_free_pages;
> -	}
> -
> +	if (dma_set_decrypted(dev, ret, size))
> +		goto out_free_pages;
>   	memset(ret, 0, size);
>   
>   	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> @@ -259,13 +263,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	return ret;
>   
>   out_encrypt_pages:
> -	if (force_dma_unencrypted(dev)) {
> -		err = set_memory_encrypted((unsigned long)page_address(page),
> -					   1 << get_order(size));
> -		/* If memory cannot be re-encrypted, it must be leaked */
> -		if (err)
> -			return NULL;
> -	}
> +	/* If memory cannot be re-encrypted, it must be leaked */
> +	if (dma_set_encrypted(dev, page_address(page), size))
> +		return NULL;
>   out_free_pages:
>   	__dma_direct_free_pages(dev, page, size);
>   	return NULL;
> @@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size,
>   	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
>   		return;
>   
> -	if (force_dma_unencrypted(dev))
> -		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
> +	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
>   
>   	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
>   		vunmap(cpu_addr);
> @@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>   	}
>   
>   	ret = page_address(page);
> -	if (force_dma_unencrypted(dev)) {
> -		if (set_memory_decrypted((unsigned long)ret,
> -				1 << get_order(size)))
> -			goto out_free_pages;
> -	}
> +	if (dma_set_decrypted(dev, ret, size))
> +		goto out_free_pages;
>   	memset(ret, 0, size);
>   	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
>   	return page;
> @@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
>   	    dma_free_from_pool(dev, vaddr, size))
>   		return;
>   
> -	if (force_dma_unencrypted(dev))
> -		set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
> -
> +	dma_set_encrypted(dev, vaddr, 1 << page_order);
>   	__dma_direct_free_pages(dev, page, size);
>   }
>   
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 02/10] dma-direct: unmapped remapped pages when dma_set_decrypted
  2021-10-21  9:06 ` [PATCH 02/10] dma-direct: unmapped remapped pages when dma_set_decrypted Christoph Hellwig
@ 2021-11-04 12:35   ` Robin Murphy
  2021-11-09 14:10     ` Christoph Hellwig
  0 siblings, 1 reply; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:35 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> When dma_set_decrypted fails for the remapping case in dma_direct_alloc
> we also need to unmap the pages before freeing them.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 5 ++++-
>   1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index d4d54af31a341..2fef8dd401fe9 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -230,7 +230,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   		if (!ret)
>   			goto out_free_pages;
>   		if (dma_set_decrypted(dev, ret, size))

I was going to say just stick the vunmap() in here to avoid adding yet 
more messy conditionals, but one rabbit hole later... Given that the 
dma_pgprot() we've just passed to dma_common_pages_remap() already adds 
in pgprot_decrypted, why is this even here at all?

Robin.

> -			goto out_free_pages;
> +			goto out_unmap_pages;
>   		memset(ret, 0, size);
>   		goto done;
>   	}
> @@ -266,6 +266,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	/* If memory cannot be re-encrypted, it must be leaked */
>   	if (dma_set_encrypted(dev, page_address(page), size))
>   		return NULL;
> +out_unmap_pages:
> +	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(ret))
> +		vunmap(ret);
>   out_free_pages:
>   	__dma_direct_free_pages(dev, page, size);
>   	return NULL;
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 03/10] dma-direct: leak memory that can't be re-encrypted
  2021-10-21  9:06 ` [PATCH 03/10] dma-direct: leak memory that can't be re-encrypted Christoph Hellwig
@ 2021-11-04 12:35   ` Robin Murphy
  2021-11-09 14:19     ` Christoph Hellwig
  0 siblings, 1 reply; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:35 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> We must never unencryped memory go back into the general page pool.
> So if we fail to set it back to encrypted when freeing DMA memory, leak
> the memory insted and warn the user.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 18 ++++++++++++++----
>   1 file changed, 14 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 2fef8dd401fe9..60cb75aa6778e 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -263,9 +263,11 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	return ret;
>   
>   out_encrypt_pages:
> -	/* If memory cannot be re-encrypted, it must be leaked */
> -	if (dma_set_encrypted(dev, page_address(page), size))
> +	if (dma_set_encrypted(dev, page_address(page), size)) {
> +		pr_warn_ratelimited(
> +			"leaking DMA memory that can't be re-encrypted\n");

Given that this is consistent for all uses of dma_set_encrypted(), seems 
like it should be factored into the helper itself.

Robin.

>   		return NULL;
> +	}
>   out_unmap_pages:
>   	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(ret))
>   		vunmap(ret);
> @@ -307,7 +309,11 @@ void dma_direct_free(struct device *dev, size_t size,
>   	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
>   		return;
>   
> -	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
> +	if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) {
> +		pr_warn_ratelimited(
> +			"leaking DMA memory that can't be re-encrypted\n");
> +		return;
> +	}
>   
>   	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
>   		vunmap(cpu_addr);
> @@ -365,7 +371,11 @@ void dma_direct_free_pages(struct device *dev, size_t size,
>   	    dma_free_from_pool(dev, vaddr, size))
>   		return;
>   
> -	dma_set_encrypted(dev, vaddr, 1 << page_order);
> +	if (dma_set_encrypted(dev, vaddr, 1 << page_order)) {
> +		pr_warn_ratelimited(
> +			"leaking DMA memory that can't be re-encrypted\n");
> +		return;
> +	}
>   	__dma_direct_free_pages(dev, page, size);
>   }
>   
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc
  2021-10-21  9:06 ` [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
@ 2021-11-04 12:35   ` Robin Murphy
  2021-11-09 14:23     ` Christoph Hellwig
  0 siblings, 1 reply; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:35 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> Add a local variable to track if we want to remap the returned address
> using vmap and use that to simplify the code flow.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 47 +++++++++++++++++++++++----------------------
>   1 file changed, 24 insertions(+), 23 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 60cb75aa6778e..a6b6fe72af4d1 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -166,6 +166,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
>   void *dma_direct_alloc(struct device *dev, size_t size,
>   		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
>   {
> +	bool remap = false;

How about also adding a "bool set_uncached = false"...

>   	struct page *page;
>   	void *ret;
>   
> @@ -217,9 +218,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	if (!page)
>   		return NULL;
>   
> -	if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> -	     !dev_is_dma_coherent(dev)) ||
> -	    (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
> +	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
> +		remap = true;
> +	} else if (PageHighMem(page)) {
> +		/*
> +		 * Depending on the cma= arguments and per-arch setup,
> +		 * dma_alloc_contiguous could return highmem pages.
> +		 * Without remapping there is no way to return them here, so
> +		 * log an error and fail.
> +		 */
> +		if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
> +			dev_info(dev, "Rejecting highmem page from CMA.\n");
> +			goto out_free_pages;
> +		}
> +		remap = true;
> +	}

...then "else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
		set_uncached = true;"...

> +
> +	if (remap) {
>   		/* remove any dirty cache lines on the kernel alias */
>   		arch_dma_prep_coherent(page, size);
>   
> @@ -229,36 +244,22 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   				__builtin_return_address(0));
>   		if (!ret)
>   			goto out_free_pages;
> -		if (dma_set_decrypted(dev, ret, size))
> -			goto out_unmap_pages;
> -		memset(ret, 0, size);
> -		goto done;
> -	}
> -
> -	if (PageHighMem(page)) {
> -		/*
> -		 * Depending on the cma= arguments and per-arch setup
> -		 * dma_alloc_contiguous could return highmem pages.
> -		 * Without remapping there is no way to return them here,
> -		 * so log an error and fail.
> -		 */
> -		dev_info(dev, "Rejecting highmem page from CMA.\n");
> -		goto out_free_pages;
> +	} else {
> +		ret = page_address(page);

As before, I'm thinking that dma_set_decrypted() probably belongs in here.

>   	}
>   
> -	ret = page_address(page);
>   	if (dma_set_decrypted(dev, ret, size))
> -		goto out_free_pages;
> +		goto out_unmap_pages;
>   	memset(ret, 0, size);
>   
> -	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> -	    !dev_is_dma_coherent(dev)) {
> +	if (!dev_is_dma_coherent(dev) && !remap &&
> +	    IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) {

...then from earlier we'd have just a nice "if (set_uncached)" here?

>   		arch_dma_prep_coherent(page, size);
>   		ret = arch_dma_set_uncached(ret, size);
>   		if (IS_ERR(ret))
>   			goto out_encrypt_pages;

 From a quick Kconfig survey, this is a purely theoretical exercise in 
dead code generation. Let's just be pragmatic, stick in a
"BUILD_BUG_ON(IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 
IS_ENABLED(CONFIG_ARCH_HAS_MEM_ENCRYPT));" and leave this re-encryption 
case for theoretical future arch maintainers to worry about.

All that said, I'm also now wondering why the arch_set_dma_uncached() 
call is down here in the first place. If we could do it at as an else 
case of the remapping stage (given that it's a semantically equivalent 
operation), the complexity inherently falls away.

>   	}
> -done:
> +
>   	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
>   	return ret;
>   
> 

If the "out_unmap_pages" step is even still necessary, I think the 
condition there should now simplify down to "if (remap)" as well.

Robin.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 06/10] dma-direct: refactor the !coherent checks in dma_direct_alloc
  2021-10-21  9:06 ` [PATCH 06/10] dma-direct: refactor the !coherent checks in dma_direct_alloc Christoph Hellwig
@ 2021-11-04 12:36   ` Robin Murphy
  2021-11-09 14:27     ` Christoph Hellwig
  0 siblings, 1 reply; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:36 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> Add a big central !dev_is_dma_coherent(dev) block to deal with as much
> as of the uncached allocation schemes and document the schemes a bit
> better.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 58 ++++++++++++++++++++++++++++-----------------
>   1 file changed, 36 insertions(+), 22 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 4ffdb524942a1..d66f37f34ba71 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -196,29 +196,46 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
>   		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
>   
> -	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> -	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> -	    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
> -	    !dev_is_dma_coherent(dev) &&
> -	    !is_swiotlb_for_alloc(dev))
> -		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
> +	if (!dev_is_dma_coherent(dev)) {
> +		/*
> +		 * Fallback to the arch handler if it exists.  This should
> +		 * eventually go away.
> +		 */
> +		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> +		    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> +		    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
> +		    !is_swiotlb_for_alloc(dev))
> +			return arch_dma_alloc(dev, size, dma_handle, gfp,
> +					      attrs);
>   
> -	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
> -	    !dev_is_dma_coherent(dev))
> -		return dma_alloc_from_global_coherent(dev, size, dma_handle);
> +		/*
> +		 * If there is a global pool, always allocate from it for
> +		 * non-coherent devices.
> +		 */
> +		if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
> +			return dma_alloc_from_global_coherent(dev, size,
> +					dma_handle);
> +
> +		/*
> +		 * Otherwise remap if the architecture is asking for it.  But
> +		 * given that remapping memory is a blocking operation we'll
> +		 * instead have to dip into the atomic pools.
> +		 */
> +		if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
> +			if (!gfpflags_allow_blocking(gfp) &&
> +			    !is_swiotlb_for_alloc(dev))
> +				return dma_direct_alloc_from_pool(dev, size,
> +						dma_handle, gfp);
> +			remap = true;

How about:
		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);

		if (remap && ...)

for a bit less indentation? FWIW I reckon it's slightly more obvious 
that way round.

Robin.

> +		}
> +	}
>   
>   	/*
> -	 * Remapping or decrypting memory may block. If either is required and
> -	 * we can't block, allocate the memory from the atomic pools.
> -	 * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
> -	 * set up another device coherent pool by shared-dma-pool and use
> -	 * dma_alloc_from_dev_coherent instead.
> +	 * Decrypting memory may block, so allocate the memory from the atomic
> +	 * pools if we can't block.
>   	 */
>   	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
> -	    !gfpflags_allow_blocking(gfp) &&
> -	    (force_dma_unencrypted(dev) ||
> -	     (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> -	      !dev_is_dma_coherent(dev))) &&
> +	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
>   	    !is_swiotlb_for_alloc(dev))
>   		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
> @@ -226,10 +243,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
>   	if (!page)
>   		return NULL;
> -
> -	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
> -		remap = true;
> -	} else if (PageHighMem(page)) {
> +	if (PageHighMem(page)) {
>   		/*
>   		 * Depending on the cma= arguments and per-arch setup,
>   		 * dma_alloc_contiguous could return highmem pages.
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 05/10] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
  2021-10-21  9:06 ` [PATCH 05/10] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
@ 2021-11-04 12:36   ` Robin Murphy
  2021-11-09 14:25     ` Christoph Hellwig
  0 siblings, 1 reply; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:36 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> Split the code for DMA_ATTR_NO_KERNEL_MAPPING allocations into a separate
> helper to make dma_direct_alloc a little more readable.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> Acked-by: David Rientjes <rientjes@google.com>
> ---
>   kernel/dma/direct.c | 31 ++++++++++++++++++++-----------
>   1 file changed, 20 insertions(+), 11 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index a6b6fe72af4d1..4ffdb524942a1 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -163,6 +163,24 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
>   	return ret;
>   }
>   
> +static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
> +		dma_addr_t *dma_handle, gfp_t gfp)
> +{
> +	struct page *page;
> +
> +	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
> +	if (!page)
> +		return NULL;
> +
> +	/* remove any dirty cache lines on the kernel alias */
> +	if (!PageHighMem(page))
> +		arch_dma_prep_coherent(page, size);
> +
> +	/* return the page pointer as the opaque cookie */
> +	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
> +	return page;
> +}
> +
>   void *dma_direct_alloc(struct device *dev, size_t size,
>   		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
>   {
> @@ -175,17 +193,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   		gfp |= __GFP_NOWARN;
>   
>   	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
> -	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
> -		page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
> -		if (!page)
> -			return NULL;
> -		/* remove any dirty cache lines on the kernel alias */
> -		if (!PageHighMem(page))
> -			arch_dma_prep_coherent(page, size);
> -		*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
> -		/* return the page pointer as the opaque cookie */
> -		return page;
> -	}
> +	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))

Hmm, does force_dma_unencrypted() actually matter if the caller doesn't 
want to access the buffer itself? Presumably any subsequent mmap() to 
userspace would still do the right thing?

Robin.

> +		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
>   
>   	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
>   	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 07/10] dma-direct: warn if there is no pool for force unencrypted allocations
  2021-10-21  9:06 ` [PATCH 07/10] dma-direct: warn if there is no pool for force unencrypted allocations Christoph Hellwig
@ 2021-11-04 12:36   ` Robin Murphy
  0 siblings, 0 replies; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:36 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> Instead of blindly running into a blocking operation for a non-blocking gfp,
> return NULL and spew an error.  Note that Kconfig prevents this for all
> currently relevant platforms, and this is just a debug check.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 9 +++++----
>   1 file changed, 5 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index d66f37f34ba71..680fe10410645 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -154,6 +154,9 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
>   	u64 phys_mask;
>   	void *ret;
>   
> +	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
> +		return NULL;
> +
>   	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
>   					   &phys_mask);
>   	page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
> @@ -234,8 +237,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	 * Decrypting memory may block, so allocate the memory from the atomic
>   	 * pools if we can't block.
>   	 */
> -	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
> -	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
> +	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
>   	    !is_swiotlb_for_alloc(dev))
>   		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
> @@ -353,8 +355,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>   	struct page *page;
>   	void *ret;
>   
> -	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
> -	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
> +	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
>   	    !is_swiotlb_for_alloc(dev))
>   		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 08/10] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals
  2021-10-21  9:06 ` [PATCH 08/10] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals Christoph Hellwig
@ 2021-11-04 12:37   ` Robin Murphy
  0 siblings, 0 replies; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:37 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> swiotlb_alloc and swiotlb_free are properly stubbed out if
> CONFIG_DMA_RESTRICTED_POOL is not set, so skip the extra checks.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 6 ++----
>   1 file changed, 2 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 680fe10410645..f4ac2e1cdf469 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -92,8 +92,7 @@ static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
>   static void __dma_direct_free_pages(struct device *dev, struct page *page,
>   				    size_t size)
>   {
> -	if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
> -	    swiotlb_free(dev, page, size))
> +	if (swiotlb_free(dev, page, size))
>   		return;
>   	dma_free_contiguous(dev, page, size);
>   }
> @@ -109,8 +108,7 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>   
>   	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
>   					   &phys_limit);
> -	if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
> -	    is_swiotlb_for_alloc(dev)) {
> +	if (is_swiotlb_for_alloc(dev)) {
>   		page = swiotlb_alloc(dev, size);
>   		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
>   			__dma_direct_free_pages(dev, page, size);
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 09/10] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages
  2021-10-21  9:06 ` [PATCH 09/10] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages Christoph Hellwig
@ 2021-11-04 12:37   ` Robin Murphy
  0 siblings, 0 replies; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:37 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> Add a new helper to deal with the swiotlb case.  This keeps the code
> nicely boundled and removes the not required call to
> dma_direct_optimal_gfp_mask for the swiotlb case.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 24 +++++++++++++++---------
>   1 file changed, 15 insertions(+), 9 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index f4ac2e1cdf469..f2ec40f5733fc 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -97,6 +97,18 @@ static void __dma_direct_free_pages(struct device *dev, struct page *page,
>   	dma_free_contiguous(dev, page, size);
>   }
>   
> +static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
> +{
> +	struct page *page = swiotlb_alloc(dev, size);
> +
> +	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
> +		swiotlb_free(dev, page, size);
> +		return NULL;
> +	}
> +
> +	return page;
> +}
> +
>   static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>   		gfp_t gfp)
>   {
> @@ -106,17 +118,11 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>   
>   	WARN_ON_ONCE(!PAGE_ALIGNED(size));
>   
> +	if (is_swiotlb_for_alloc(dev))
> +		return dma_direct_alloc_swiotlb(dev, size);
> +
>   	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
>   					   &phys_limit);
> -	if (is_swiotlb_for_alloc(dev)) {
> -		page = swiotlb_alloc(dev, size);
> -		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
> -			__dma_direct_free_pages(dev, page, size);
> -			return NULL;
> -		}
> -		return page;
> -	}
> -
>   	page = dma_alloc_contiguous(dev, size, gfp);
>   	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
>   		dma_free_contiguous(dev, page, size);
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 10/10] dma-direct: add a dma_direct_use_pool helper
  2021-10-21  9:06 ` [PATCH 10/10] dma-direct: add a dma_direct_use_pool helper Christoph Hellwig
@ 2021-11-04 12:37   ` Robin Murphy
  0 siblings, 0 replies; 29+ messages in thread
From: Robin Murphy @ 2021-11-04 12:37 UTC (permalink / raw)
  To: Christoph Hellwig, iommu; +Cc: David Rientjes

On 2021-10-21 10:06, Christoph Hellwig wrote:
> Add a helper to check if a potentially blocking operation should
> dip into the atomic pools.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   kernel/dma/direct.c | 18 ++++++++++++------
>   1 file changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index f2ec40f5733fc..babf79c16c041 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -151,6 +151,15 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>   	return page;
>   }
>   
> +/*
> + * Check if a potentially blocking operations needs to dip into the atomic
> + * pools for the given device/gfp.
> + */
> +static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
> +{
> +	return gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
> +}
> +
>   static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
>   		dma_addr_t *dma_handle, gfp_t gfp)
>   {
> @@ -229,8 +238,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   		 * instead have to dip into the atomic pools.
>   		 */
>   		if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
> -			if (!gfpflags_allow_blocking(gfp) &&
> -			    !is_swiotlb_for_alloc(dev))
> +			if (dma_direct_use_pool(dev, gfp))
>   				return dma_direct_alloc_from_pool(dev, size,
>   						dma_handle, gfp);
>   			remap = true;
> @@ -241,8 +249,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   	 * Decrypting memory may block, so allocate the memory from the atomic
>   	 * pools if we can't block.
>   	 */
> -	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
> -	    !is_swiotlb_for_alloc(dev))
> +	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
>   		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
>   	/* we always manually zero the memory once we are done */
> @@ -359,8 +366,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>   	struct page *page;
>   	void *ret;
>   
> -	if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
> -	    !is_swiotlb_for_alloc(dev))
> +	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
>   		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
>   	page = __dma_direct_alloc_pages(dev, size, gfp);
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 02/10] dma-direct: unmapped remapped pages when dma_set_decrypted
  2021-11-04 12:35   ` Robin Murphy
@ 2021-11-09 14:10     ` Christoph Hellwig
  2021-11-09 14:27       ` Robin Murphy
  0 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-11-09 14:10 UTC (permalink / raw)
  To: Robin Murphy; +Cc: iommu, Christoph Hellwig, David Rientjes

On Thu, Nov 04, 2021 at 12:35:41PM +0000, Robin Murphy wrote:
> On 2021-10-21 10:06, Christoph Hellwig wrote:
>> When dma_set_decrypted fails for the remapping case in dma_direct_alloc
>> we also need to unmap the pages before freeing them.
>>
>> Signed-off-by: Christoph Hellwig <hch@lst.de>
>> ---
>>   kernel/dma/direct.c | 5 ++++-
>>   1 file changed, 4 insertions(+), 1 deletion(-)
>>
>> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
>> index d4d54af31a341..2fef8dd401fe9 100644
>> --- a/kernel/dma/direct.c
>> +++ b/kernel/dma/direct.c
>> @@ -230,7 +230,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>>   		if (!ret)
>>   			goto out_free_pages;
>>   		if (dma_set_decrypted(dev, ret, size))
>
> I was going to say just stick the vunmap() in here to avoid adding yet more 
> messy conditionals, but one rabbit hole later... Given that the 
> dma_pgprot() we've just passed to dma_common_pages_remap() already adds in 
> pgprot_decrypted, why is this even here at all?

Good point.  This combination is pretty much untested anyway as the
architectures that support memory encryption never remap, but yes
I think the best is if gets removed.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 03/10] dma-direct: leak memory that can't be re-encrypted
  2021-11-04 12:35   ` Robin Murphy
@ 2021-11-09 14:19     ` Christoph Hellwig
  0 siblings, 0 replies; 29+ messages in thread
From: Christoph Hellwig @ 2021-11-09 14:19 UTC (permalink / raw)
  To: Robin Murphy; +Cc: iommu, Christoph Hellwig, David Rientjes

On Thu, Nov 04, 2021 at 12:35:49PM +0000, Robin Murphy wrote:
> Given that this is consistent for all uses of dma_set_encrypted(), seems 
> like it should be factored into the helper itself.

Done.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc
  2021-11-04 12:35   ` Robin Murphy
@ 2021-11-09 14:23     ` Christoph Hellwig
  0 siblings, 0 replies; 29+ messages in thread
From: Christoph Hellwig @ 2021-11-09 14:23 UTC (permalink / raw)
  To: Robin Murphy; +Cc: iommu, Christoph Hellwig, David Rientjes

On Thu, Nov 04, 2021 at 12:35:59PM +0000, Robin Murphy wrote:
>> @@ -166,6 +166,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
>>   void *dma_direct_alloc(struct device *dev, size_t size,
>>   		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
>>   {
>> +	bool remap = false;
>
> How about also adding a "bool set_uncached = false"...

Done.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 05/10] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
  2021-11-04 12:36   ` Robin Murphy
@ 2021-11-09 14:25     ` Christoph Hellwig
  2021-11-09 14:39       ` Robin Murphy
  0 siblings, 1 reply; 29+ messages in thread
From: Christoph Hellwig @ 2021-11-09 14:25 UTC (permalink / raw)
  To: Robin Murphy; +Cc: iommu, Christoph Hellwig, David Rientjes

On Thu, Nov 04, 2021 at 12:36:16PM +0000, Robin Murphy wrote:
>> -		*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
>> -		/* return the page pointer as the opaque cookie */
>> -		return page;
>> -	}
>> +	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
>
> Hmm, does force_dma_unencrypted() actually matter if the caller doesn't 
> want to access the buffer itself? Presumably any subsequent mmap() to 
> userspace would still do the right thing?

Well, force_dma_unencrypted is a bit misnamed I think.  It is mostly
used to force bounce buffering for protected guest schemes.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 06/10] dma-direct: refactor the !coherent checks in dma_direct_alloc
  2021-11-04 12:36   ` Robin Murphy
@ 2021-11-09 14:27     ` Christoph Hellwig
  0 siblings, 0 replies; 29+ messages in thread
From: Christoph Hellwig @ 2021-11-09 14:27 UTC (permalink / raw)
  To: Robin Murphy; +Cc: iommu, Christoph Hellwig, David Rientjes

On Thu, Nov 04, 2021 at 12:36:08PM +0000, Robin Murphy wrote:
> How about:
> 		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
>
> 		if (remap && ...)
>
> for a bit less indentation? FWIW I reckon it's slightly more obvious that 
> way round.

Done.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 02/10] dma-direct: unmapped remapped pages when dma_set_decrypted
  2021-11-09 14:10     ` Christoph Hellwig
@ 2021-11-09 14:27       ` Robin Murphy
  0 siblings, 0 replies; 29+ messages in thread
From: Robin Murphy @ 2021-11-09 14:27 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: iommu, David Rientjes

On 2021-11-09 14:10, Christoph Hellwig wrote:
> On Thu, Nov 04, 2021 at 12:35:41PM +0000, Robin Murphy wrote:
>> On 2021-10-21 10:06, Christoph Hellwig wrote:
>>> When dma_set_decrypted fails for the remapping case in dma_direct_alloc
>>> we also need to unmap the pages before freeing them.
>>>
>>> Signed-off-by: Christoph Hellwig <hch@lst.de>
>>> ---
>>>    kernel/dma/direct.c | 5 ++++-
>>>    1 file changed, 4 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
>>> index d4d54af31a341..2fef8dd401fe9 100644
>>> --- a/kernel/dma/direct.c
>>> +++ b/kernel/dma/direct.c
>>> @@ -230,7 +230,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>>>    		if (!ret)
>>>    			goto out_free_pages;
>>>    		if (dma_set_decrypted(dev, ret, size))
>>
>> I was going to say just stick the vunmap() in here to avoid adding yet more
>> messy conditionals, but one rabbit hole later... Given that the
>> dma_pgprot() we've just passed to dma_common_pages_remap() already adds in
>> pgprot_decrypted, why is this even here at all?
> 
> Good point.  This combination is pretty much untested anyway as the
> architectures that support memory encryption never remap, but yes
> I think the best is if gets removed.

Right, I carried on looking out of curiosity, and of the two other 
architectures, it would definitely be broken on PowerPC whose 
set_memory_decrypted() assumes it can call virt_to_phys() on the given 
VA, while s390 appears to pass the VA straight to its ultravisor call so 
I'm not sure what the deal is there.

I suspect we might run into this properly on arm64, where some of the 
various protected virtualisation prototypes are currently banking on 
hooking into the memory encryption APIs, but even then I'd rather fix 
said APIs properly such that vmap() with pgprot_decrypted() always ends 
up doing the right thing - as it stands, having "generic" functionality 
which may or may not actually do what you think it should is terrible.

Cheers,
Robin.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 05/10] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
  2021-11-09 14:25     ` Christoph Hellwig
@ 2021-11-09 14:39       ` Robin Murphy
  0 siblings, 0 replies; 29+ messages in thread
From: Robin Murphy @ 2021-11-09 14:39 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: iommu, David Rientjes

On 2021-11-09 14:25, Christoph Hellwig wrote:
> On Thu, Nov 04, 2021 at 12:36:16PM +0000, Robin Murphy wrote:
>>> -		*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
>>> -		/* return the page pointer as the opaque cookie */
>>> -		return page;
>>> -	}
>>> +	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
>>
>> Hmm, does force_dma_unencrypted() actually matter if the caller doesn't
>> want to access the buffer itself? Presumably any subsequent mmap() to
>> userspace would still do the right thing?
> 
> Well, force_dma_unencrypted is a bit misnamed I think.  It is mostly
> used to force bounce buffering for protected guest schemes.

Ah, that'll be what that small nagging doubt was at the time :)

Furthermore, implicit in the fact that the only comment I had was just 
musing on the existing code being moved, feel free to have a

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

for the refactor as-is.

Cheers,
Robin.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 29+ messages in thread

end of thread, other threads:[~2021-11-09 14:39 UTC | newest]

Thread overview: 29+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-21  9:06 dma-direct fixes and cleanups v2 Christoph Hellwig
2021-10-21  9:06 ` [PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
2021-11-04 12:35   ` Robin Murphy
2021-10-21  9:06 ` [PATCH 02/10] dma-direct: unmapped remapped pages when dma_set_decrypted Christoph Hellwig
2021-11-04 12:35   ` Robin Murphy
2021-11-09 14:10     ` Christoph Hellwig
2021-11-09 14:27       ` Robin Murphy
2021-10-21  9:06 ` [PATCH 03/10] dma-direct: leak memory that can't be re-encrypted Christoph Hellwig
2021-11-04 12:35   ` Robin Murphy
2021-11-09 14:19     ` Christoph Hellwig
2021-10-21  9:06 ` [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
2021-11-04 12:35   ` Robin Murphy
2021-11-09 14:23     ` Christoph Hellwig
2021-10-21  9:06 ` [PATCH 05/10] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
2021-11-04 12:36   ` Robin Murphy
2021-11-09 14:25     ` Christoph Hellwig
2021-11-09 14:39       ` Robin Murphy
2021-10-21  9:06 ` [PATCH 06/10] dma-direct: refactor the !coherent checks in dma_direct_alloc Christoph Hellwig
2021-11-04 12:36   ` Robin Murphy
2021-11-09 14:27     ` Christoph Hellwig
2021-10-21  9:06 ` [PATCH 07/10] dma-direct: warn if there is no pool for force unencrypted allocations Christoph Hellwig
2021-11-04 12:36   ` Robin Murphy
2021-10-21  9:06 ` [PATCH 08/10] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals Christoph Hellwig
2021-11-04 12:37   ` Robin Murphy
2021-10-21  9:06 ` [PATCH 09/10] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages Christoph Hellwig
2021-11-04 12:37   ` Robin Murphy
2021-10-21  9:06 ` [PATCH 10/10] dma-direct: add a dma_direct_use_pool helper Christoph Hellwig
2021-11-04 12:37   ` Robin Murphy
2021-11-04  8:54 ` dma-direct fixes and cleanups v2 Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.