All of lore.kernel.org
 help / color / mirror / Atom feed
* dma-direct cleanups
@ 2021-10-19 12:29 Christoph Hellwig
  2021-10-19 12:29 ` [PATCH 1/4] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
                   ` (3 more replies)
  0 siblings, 4 replies; 10+ messages in thread
From: Christoph Hellwig @ 2021-10-19 12:29 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, id Rientjes

Hi all,

Linus complained about the complex flow in dma_direct_alloc, so this
tries to simplify it a bit, and while I was at it I also made sure that
unencrypted pages never leak back into the page allocator.

Diffstat
 direct.c |  133 +++++++++++++++++++++++++++++++++++----------------------------
 1 file changed, 74 insertions(+), 59 deletions(-)
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/4] dma-direct: factor out dma_set_{de,en}crypted helpers
  2021-10-19 12:29 dma-direct cleanups Christoph Hellwig
@ 2021-10-19 12:29 ` Christoph Hellwig
  2021-10-19 19:54   ` David Rientjes via iommu
  2021-10-19 12:29 ` [PATCH 2/4] dma-direct: leak memory that can't be re-encrypted Christoph Hellwig
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 10+ messages in thread
From: Christoph Hellwig @ 2021-10-19 12:29 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, id Rientjes

Factor out helpers the make dealing with memory encryption a little less
cumbersome.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 55 +++++++++++++++++++++------------------------
 1 file changed, 25 insertions(+), 30 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4c6c5e0635e34..96f02248e7fa2 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
 }
 
+static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
+{
+	if (!force_dma_unencrypted(dev))
+		return 0;
+	return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
+static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
+{
+	if (!force_dma_unencrypted(dev))
+		return 0;
+	return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
 				    size_t size)
 {
@@ -216,12 +230,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 				__builtin_return_address(0));
 		if (!ret)
 			goto out_free_pages;
-		if (force_dma_unencrypted(dev)) {
-			err = set_memory_decrypted((unsigned long)ret,
-						   1 << get_order(size));
-			if (err)
-				goto out_free_pages;
-		}
+		err = dma_set_decrypted(dev, ret, size);
 		memset(ret, 0, size);
 		goto done;
 	}
@@ -238,13 +247,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	}
 
 	ret = page_address(page);
-	if (force_dma_unencrypted(dev)) {
-		err = set_memory_decrypted((unsigned long)ret,
-					   1 << get_order(size));
-		if (err)
-			goto out_free_pages;
-	}
-
+	err = dma_set_decrypted(dev, ret, size);
+	if (err)
+		goto out_free_pages;
 	memset(ret, 0, size);
 
 	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
@@ -259,13 +264,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	return ret;
 
 out_encrypt_pages:
-	if (force_dma_unencrypted(dev)) {
-		err = set_memory_encrypted((unsigned long)page_address(page),
-					   1 << get_order(size));
-		/* If memory cannot be re-encrypted, it must be leaked */
-		if (err)
-			return NULL;
-	}
+	/* If memory cannot be re-encrypted, it must be leaked */
+	if (dma_set_encrypted(dev, page_address(page), size))
+		return NULL;
 out_free_pages:
 	__dma_direct_free_pages(dev, page, size);
 	return NULL;
@@ -304,8 +305,7 @@ void dma_direct_free(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
 		return;
 
-	if (force_dma_unencrypted(dev))
-		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
 
 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
 		vunmap(cpu_addr);
@@ -341,11 +341,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	}
 
 	ret = page_address(page);
-	if (force_dma_unencrypted(dev)) {
-		if (set_memory_decrypted((unsigned long)ret,
-				1 << get_order(size)))
-			goto out_free_pages;
-	}
+	if (dma_set_decrypted(dev, ret, size))
+		goto out_free_pages;
 	memset(ret, 0, size);
 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
 	return page;
@@ -366,9 +363,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, vaddr, size))
 		return;
 
-	if (force_dma_unencrypted(dev))
-		set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
-
+	dma_set_encrypted(dev, vaddr, 1 << page_order);
 	__dma_direct_free_pages(dev, page, size);
 }
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/4] dma-direct: leak memory that can't be re-encrypted
  2021-10-19 12:29 dma-direct cleanups Christoph Hellwig
  2021-10-19 12:29 ` [PATCH 1/4] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
@ 2021-10-19 12:29 ` Christoph Hellwig
  2021-10-19 19:56   ` David Rientjes via iommu
  2021-10-19 12:29 ` [PATCH 3/4] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
  2021-10-19 12:29 ` [PATCH 4/4] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
  3 siblings, 1 reply; 10+ messages in thread
From: Christoph Hellwig @ 2021-10-19 12:29 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, id Rientjes

We must never unencryped memory go back into the general page pool.
So if we fail to set it back to encrypted when freeing DMA memory, leak
the memory insted and warn the user.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 96f02248e7fa2..6673f7aebf787 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -264,9 +264,11 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	return ret;
 
 out_encrypt_pages:
-	/* If memory cannot be re-encrypted, it must be leaked */
-	if (dma_set_encrypted(dev, page_address(page), size))
+	if (dma_set_encrypted(dev, page_address(page), size)) {
+		pr_warn_ratelimited(
+			"leaking DMA memory that can't be re-encrypted\n");
 		return NULL;
+	}
 out_free_pages:
 	__dma_direct_free_pages(dev, page, size);
 	return NULL;
@@ -305,7 +307,11 @@ void dma_direct_free(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
 		return;
 
-	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
+	if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) {
+		pr_warn_ratelimited(
+			"leaking DMA memory that can't be re-encrypted\n");
+		return;
+	}
 
 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
 		vunmap(cpu_addr);
@@ -363,7 +369,10 @@ void dma_direct_free_pages(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, vaddr, size))
 		return;
 
-	dma_set_encrypted(dev, vaddr, 1 << page_order);
+	if (dma_set_encrypted(dev, vaddr, 1 << page_order)) {
+		pr_warn_ratelimited(
+			"leaking DMA memory that can't be re-encrypted\n");
+	}
 	__dma_direct_free_pages(dev, page, size);
 }
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/4] dma-direct: clean up the remapping checks in dma_direct_alloc
  2021-10-19 12:29 dma-direct cleanups Christoph Hellwig
  2021-10-19 12:29 ` [PATCH 1/4] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
  2021-10-19 12:29 ` [PATCH 2/4] dma-direct: leak memory that can't be re-encrypted Christoph Hellwig
@ 2021-10-19 12:29 ` Christoph Hellwig
  2021-10-19 12:29 ` [PATCH 4/4] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
  3 siblings, 0 replies; 10+ messages in thread
From: Christoph Hellwig @ 2021-10-19 12:29 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, id Rientjes

Add a local variable to track if we want to remap the returned address
using vmap and use that to simplify the code flow.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 44 +++++++++++++++++++++++---------------------
 1 file changed, 23 insertions(+), 21 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 6673f7aebf787..41498243d8444 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -166,6 +166,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 void *dma_direct_alloc(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
+	bool remap = false;
 	struct page *page;
 	void *ret;
 	int err;
@@ -218,9 +219,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	if (!page)
 		return NULL;
 
-	if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	     !dev_is_dma_coherent(dev)) ||
-	    (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+	if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
+		remap = true;
+	} else if (PageHighMem(page)) {
+		/*
+		 * Depending on the cma= arguments and per-arch setup,
+		 * dma_alloc_contiguous could return highmem pages.
+		 * Without remapping there is no way to return them here, so
+		 * log an error and fail.
+		 */
+		if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
+			dev_info(dev, "Rejecting highmem page from CMA.\n");
+			goto out_free_pages;
+		}
+		remap = true;
+	}
+
+	if (remap) {
 		/* remove any dirty cache lines on the kernel alias */
 		arch_dma_prep_coherent(page, size);
 
@@ -230,36 +245,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 				__builtin_return_address(0));
 		if (!ret)
 			goto out_free_pages;
-		err = dma_set_decrypted(dev, ret, size);
-		memset(ret, 0, size);
-		goto done;
+	} else {
+		ret = page_address(page);
 	}
 
-	if (PageHighMem(page)) {
-		/*
-		 * Depending on the cma= arguments and per-arch setup
-		 * dma_alloc_contiguous could return highmem pages.
-		 * Without remapping there is no way to return them here,
-		 * so log an error and fail.
-		 */
-		dev_info(dev, "Rejecting highmem page from CMA.\n");
-		goto out_free_pages;
-	}
-
-	ret = page_address(page);
 	err = dma_set_decrypted(dev, ret, size);
 	if (err)
 		goto out_free_pages;
 	memset(ret, 0, size);
 
-	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !dev_is_dma_coherent(dev)) {
+	if (!remap && !dev_is_dma_coherent(dev) &&
+	    IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) {
 		arch_dma_prep_coherent(page, size);
 		ret = arch_dma_set_uncached(ret, size);
 		if (IS_ERR(ret))
 			goto out_encrypt_pages;
 	}
-done:
+
 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
 	return ret;
 
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/4] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
  2021-10-19 12:29 dma-direct cleanups Christoph Hellwig
                   ` (2 preceding siblings ...)
  2021-10-19 12:29 ` [PATCH 3/4] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
@ 2021-10-19 12:29 ` Christoph Hellwig
  2021-10-19 19:57   ` David Rientjes via iommu
  3 siblings, 1 reply; 10+ messages in thread
From: Christoph Hellwig @ 2021-10-19 12:29 UTC (permalink / raw)
  To: iommu; +Cc: Robin Murphy, id Rientjes

Split the code for DMA_ATTR_NO_KERNEL_MAPPING allocations into a separate
helper to make dma_direct_alloc a little more readable.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/direct.c | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 41498243d8444..21728c5f0ccd7 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -163,6 +163,24 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 	return ret;
 }
 
+static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t gfp)
+{
+	struct page *page;
+
+	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
+	if (!page)
+		return NULL;
+
+	/* remove any dirty cache lines on the kernel alias */
+	if (!PageHighMem(page))
+		arch_dma_prep_coherent(page, size);
+
+	/* return the page pointer as the opaque cookie */
+	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+	return page;
+}
+
 void *dma_direct_alloc(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
@@ -176,17 +194,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		gfp |= __GFP_NOWARN;
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
-		page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
-		if (!page)
-			return NULL;
-		/* remove any dirty cache lines on the kernel alias */
-		if (!PageHighMem(page))
-			arch_dma_prep_coherent(page, size);
-		*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
-		/* return the page pointer as the opaque cookie */
-		return page;
-	}
+	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
+		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
 
 	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
 	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/4] dma-direct: factor out dma_set_{de,en}crypted helpers
  2021-10-19 12:29 ` [PATCH 1/4] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
@ 2021-10-19 19:54   ` David Rientjes via iommu
  2021-10-21  6:59     ` Christoph Hellwig
  0 siblings, 1 reply; 10+ messages in thread
From: David Rientjes via iommu @ 2021-10-19 19:54 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: iommu, Robin Murphy

On Tue, 19 Oct 2021, Christoph Hellwig wrote:

> Factor out helpers the make dealing with memory encryption a little less
> cumbersome.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  kernel/dma/direct.c | 55 +++++++++++++++++++++------------------------
>  1 file changed, 25 insertions(+), 30 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 4c6c5e0635e34..96f02248e7fa2 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
>  		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
>  }
>  
> +static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
> +{
> +	if (!force_dma_unencrypted(dev))
> +		return 0;
> +	return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
> +}
> +
> +static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
> +{
> +	if (!force_dma_unencrypted(dev))
> +		return 0;
> +	return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
> +}
> +
>  static void __dma_direct_free_pages(struct device *dev, struct page *page,
>  				    size_t size)
>  {
> @@ -216,12 +230,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>  				__builtin_return_address(0));
>  		if (!ret)
>  			goto out_free_pages;
> -		if (force_dma_unencrypted(dev)) {
> -			err = set_memory_decrypted((unsigned long)ret,
> -						   1 << get_order(size));
> -			if (err)
> -				goto out_free_pages;
> -		}
> +		err = dma_set_decrypted(dev, ret, size);

Should be

	if (dma_set_decrypted(dev, ret, size))
		goto out_free_pages;

?  Otherwise we ignore the value?

>  		memset(ret, 0, size);
>  		goto done;
>  	}
> @@ -238,13 +247,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>  	}
>  
>  	ret = page_address(page);
> -	if (force_dma_unencrypted(dev)) {
> -		err = set_memory_decrypted((unsigned long)ret,
> -					   1 << get_order(size));
> -		if (err)
> -			goto out_free_pages;
> -	}
> -
> +	err = dma_set_decrypted(dev, ret, size);
> +	if (err)
> +		goto out_free_pages;
>  	memset(ret, 0, size);
>  
>  	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> @@ -259,13 +264,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>  	return ret;
>  
>  out_encrypt_pages:
> -	if (force_dma_unencrypted(dev)) {
> -		err = set_memory_encrypted((unsigned long)page_address(page),
> -					   1 << get_order(size));
> -		/* If memory cannot be re-encrypted, it must be leaked */
> -		if (err)
> -			return NULL;
> -	}
> +	/* If memory cannot be re-encrypted, it must be leaked */
> +	if (dma_set_encrypted(dev, page_address(page), size))
> +		return NULL;
>  out_free_pages:
>  	__dma_direct_free_pages(dev, page, size);
>  	return NULL;
> @@ -304,8 +305,7 @@ void dma_direct_free(struct device *dev, size_t size,
>  	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
>  		return;
>  
> -	if (force_dma_unencrypted(dev))
> -		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
> +	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
>  
>  	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
>  		vunmap(cpu_addr);
> @@ -341,11 +341,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>  	}
>  
>  	ret = page_address(page);
> -	if (force_dma_unencrypted(dev)) {
> -		if (set_memory_decrypted((unsigned long)ret,
> -				1 << get_order(size)))
> -			goto out_free_pages;
> -	}
> +	if (dma_set_decrypted(dev, ret, size))
> +		goto out_free_pages;
>  	memset(ret, 0, size);
>  	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
>  	return page;
> @@ -366,9 +363,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
>  	    dma_free_from_pool(dev, vaddr, size))
>  		return;
>  
> -	if (force_dma_unencrypted(dev))
> -		set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
> -
> +	dma_set_encrypted(dev, vaddr, 1 << page_order);
>  	__dma_direct_free_pages(dev, page, size);
>  }
>  
> -- 
> 2.30.2
> 
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/4] dma-direct: leak memory that can't be re-encrypted
  2021-10-19 12:29 ` [PATCH 2/4] dma-direct: leak memory that can't be re-encrypted Christoph Hellwig
@ 2021-10-19 19:56   ` David Rientjes via iommu
  2021-10-21  7:22     ` Christoph Hellwig
  0 siblings, 1 reply; 10+ messages in thread
From: David Rientjes via iommu @ 2021-10-19 19:56 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: iommu, Robin Murphy

On Tue, 19 Oct 2021, Christoph Hellwig wrote:

> We must never unencryped memory go back into the general page pool.
> So if we fail to set it back to encrypted when freeing DMA memory, leak
> the memory insted and warn the user.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  kernel/dma/direct.c | 17 +++++++++++++----
>  1 file changed, 13 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 96f02248e7fa2..6673f7aebf787 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -264,9 +264,11 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>  	return ret;
>  
>  out_encrypt_pages:
> -	/* If memory cannot be re-encrypted, it must be leaked */
> -	if (dma_set_encrypted(dev, page_address(page), size))
> +	if (dma_set_encrypted(dev, page_address(page), size)) {
> +		pr_warn_ratelimited(
> +			"leaking DMA memory that can't be re-encrypted\n");
>  		return NULL;
> +	}
>  out_free_pages:
>  	__dma_direct_free_pages(dev, page, size);
>  	return NULL;
> @@ -305,7 +307,11 @@ void dma_direct_free(struct device *dev, size_t size,
>  	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
>  		return;
>  
> -	dma_set_encrypted(dev, cpu_addr, 1 << page_order);
> +	if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) {
> +		pr_warn_ratelimited(
> +			"leaking DMA memory that can't be re-encrypted\n");
> +		return;
> +	}
>  
>  	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
>  		vunmap(cpu_addr);
> @@ -363,7 +369,10 @@ void dma_direct_free_pages(struct device *dev, size_t size,
>  	    dma_free_from_pool(dev, vaddr, size))
>  		return;
>  
> -	dma_set_encrypted(dev, vaddr, 1 << page_order);
> +	if (dma_set_encrypted(dev, vaddr, 1 << page_order)) {
> +		pr_warn_ratelimited(
> +			"leaking DMA memory that can't be re-encrypted\n");
> +	}

Is this actually leaking the memory?

>  	__dma_direct_free_pages(dev, page, size);
>  }
>  
> -- 
> 2.30.2
> 
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 4/4] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
  2021-10-19 12:29 ` [PATCH 4/4] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
@ 2021-10-19 19:57   ` David Rientjes via iommu
  0 siblings, 0 replies; 10+ messages in thread
From: David Rientjes via iommu @ 2021-10-19 19:57 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: iommu, Robin Murphy

On Tue, 19 Oct 2021, Christoph Hellwig wrote:

> Split the code for DMA_ATTR_NO_KERNEL_MAPPING allocations into a separate
> helper to make dma_direct_alloc a little more readable.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Acked-by: David Rientjes <rientjes@google.com>

(I think my name got mangled in your To: field on the original emails :)
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/4] dma-direct: factor out dma_set_{de,en}crypted helpers
  2021-10-19 19:54   ` David Rientjes via iommu
@ 2021-10-21  6:59     ` Christoph Hellwig
  0 siblings, 0 replies; 10+ messages in thread
From: Christoph Hellwig @ 2021-10-21  6:59 UTC (permalink / raw)
  To: David Rientjes; +Cc: iommu, Robin Murphy, Christoph Hellwig

On Tue, Oct 19, 2021 at 12:54:54PM -0700, David Rientjes wrote:
> > -						   1 << get_order(size));
> > -			if (err)
> > -				goto out_free_pages;
> > -		}
> > +		err = dma_set_decrypted(dev, ret, size);
> 
> Should be
> 
> 	if (dma_set_decrypted(dev, ret, size))
> 		goto out_free_pages;
> 
> ?  Otherwise we ignore the value?

Yes.  Except that we also need to undo the remap as well.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/4] dma-direct: leak memory that can't be re-encrypted
  2021-10-19 19:56   ` David Rientjes via iommu
@ 2021-10-21  7:22     ` Christoph Hellwig
  0 siblings, 0 replies; 10+ messages in thread
From: Christoph Hellwig @ 2021-10-21  7:22 UTC (permalink / raw)
  To: David Rientjes; +Cc: iommu, Robin Murphy, Christoph Hellwig

On Tue, Oct 19, 2021 at 12:56:36PM -0700, David Rientjes wrote:
> > -	dma_set_encrypted(dev, vaddr, 1 << page_order);
> > +	if (dma_set_encrypted(dev, vaddr, 1 << page_order)) {
> > +		pr_warn_ratelimited(
> > +			"leaking DMA memory that can't be re-encrypted\n");
> > +	}
> 
> Is this actually leaking the memory?

Yes, it should return here.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2021-10-21  7:22 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-19 12:29 dma-direct cleanups Christoph Hellwig
2021-10-19 12:29 ` [PATCH 1/4] dma-direct: factor out dma_set_{de,en}crypted helpers Christoph Hellwig
2021-10-19 19:54   ` David Rientjes via iommu
2021-10-21  6:59     ` Christoph Hellwig
2021-10-19 12:29 ` [PATCH 2/4] dma-direct: leak memory that can't be re-encrypted Christoph Hellwig
2021-10-19 19:56   ` David Rientjes via iommu
2021-10-21  7:22     ` Christoph Hellwig
2021-10-19 12:29 ` [PATCH 3/4] dma-direct: clean up the remapping checks in dma_direct_alloc Christoph Hellwig
2021-10-19 12:29 ` [PATCH 4/4] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Christoph Hellwig
2021-10-19 19:57   ` David Rientjes via iommu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.