iommu.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
@ 2019-07-10 19:01 Lendacky, Thomas
  2019-07-11 10:05 ` Christoph Hellwig
                   ` (2 more replies)
  0 siblings, 3 replies; 12+ messages in thread
From: Lendacky, Thomas @ 2019-07-10 19:01 UTC (permalink / raw)
  To: iommu, linux-kernel, x86
  Cc: Lianbo Jiang, Peter Zijlstra, Dave Hansen, Ingo Molnar,
	Borislav Petkov, Andy Lutomirski, H . Peter Anvin,
	Thomas Gleixner, Robin Murphy, Christoph Hellwig

From: Tom Lendacky <thomas.lendacky@amd.com>

If a device doesn't support DMA to a physical address that includes the
encryption bit (currently bit 47, so 48-bit DMA), then the DMA must
occur to unencrypted memory. SWIOTLB is used to satisfy that requirement
if an IOMMU is not active (enabled or configured in passthrough mode).

However, commit fafadcd16595 ("swiotlb: don't dip into swiotlb pool for
coherent allocations") modified the coherent allocation support in SWIOTLB
to use the DMA direct coherent allocation support. When an IOMMU is not
active, this resulted in dma_alloc_coherent() failing for devices that
didn't support DMA addresses that included the encryption bit.

Addressing this requires changes to the force_dma_unencrypted() function
in kernel/dma/direct.c. Since the function is now non-trivial and SME/SEV
specific, update the DMA direct support to add an arch override for the
force_dma_unencrypted() function. The arch override is selected when
CONFIG_AMD_MEM_ENCRYPT is set. The arch override function resides in the
arch/x86/mm/mem_encrypt.c file and forces unencrypted DMA when either SEV
is active or SME is active and the device does not support DMA to physical
addresses that include the encryption bit.

Fixes: fafadcd16595 ("swiotlb: don't dip into swiotlb pool for coherent allocations")
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---

Based on tree git://git.infradead.org/users/hch/dma-mapping.git for-next

 arch/x86/Kconfig          |  1 +
 arch/x86/mm/mem_encrypt.c | 30 ++++++++++++++++++++++++++++++
 kernel/dma/Kconfig        |  3 +++
 kernel/dma/direct.c       | 19 ++++++++++---------
 4 files changed, 44 insertions(+), 9 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2bbbd4d1ba31..12e02a8f9de7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1508,6 +1508,7 @@ config AMD_MEM_ENCRYPT
 	depends on X86_64 && CPU_SUP_AMD
 	select DYNAMIC_PHYSICAL_MASK
 	select ARCH_USE_MEMREMAP_PROT
+	select ARCH_HAS_FORCE_DMA_UNENCRYPTED
 	---help---
 	  Say yes to enable support for the encryption of system memory.
 	  This requires an AMD processor that supports Secure Memory
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 51f50a7a07ef..c7a88b837c43 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -18,6 +18,10 @@
 #include <linux/dma-direct.h>
 #include <linux/swiotlb.h>
 #include <linux/mem_encrypt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
@@ -351,6 +355,32 @@ bool sev_active(void)
 }
 EXPORT_SYMBOL(sev_active);
 
+/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
+bool force_dma_unencrypted(struct device *dev)
+{
+	/*
+	 * For SEV, all DMA must be to unencrypted addresses.
+	 */
+	if (sev_active())
+		return true;
+
+	/*
+	 * For SME, all DMA must be to unencrypted addresses if the
+	 * device does not support DMA to addresses that include the
+	 * encryption mask.
+	 */
+	if (sme_active()) {
+		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
+		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
+						dev->bus_dma_mask);
+
+		if (dma_dev_mask <= dma_enc_mask)
+			return true;
+	}
+
+	return false;
+}
+
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_free_decrypted_mem(void)
 {
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 70f8f8d9200e..9decbba255fc 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -48,6 +48,9 @@ config ARCH_HAS_DMA_COHERENT_TO_PFN
 config ARCH_HAS_DMA_MMAP_PGPROT
 	bool
 
+config ARCH_HAS_FORCE_DMA_UNENCRYPTED
+	bool
+
 config DMA_NONCOHERENT_CACHE_SYNC
 	bool
 
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index b90e1aede743..fb37374dae75 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -23,13 +23,14 @@
 #define ARCH_ZONE_DMA_BITS 24
 #endif
 
-/*
- * For AMD SEV all DMA must be to unencrypted addresses.
- */
-static inline bool force_dma_unencrypted(void)
+#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
+bool force_dma_unencrypted(struct device *dev);
+#else
+static inline bool force_dma_unencrypted(struct device *dev)
 {
-	return sev_active();
+	return false;
 }
+#endif
 
 static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
 {
@@ -46,7 +47,7 @@ static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
 		phys_addr_t phys)
 {
-	if (force_dma_unencrypted())
+	if (force_dma_unencrypted(dev))
 		return __phys_to_dma(dev, phys);
 	return phys_to_dma(dev, phys);
 }
@@ -67,7 +68,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
 	if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
 		dma_mask = dev->bus_dma_mask;
 
-	if (force_dma_unencrypted())
+	if (force_dma_unencrypted(dev))
 		*phys_mask = __dma_to_phys(dev, dma_mask);
 	else
 		*phys_mask = dma_to_phys(dev, dma_mask);
@@ -159,7 +160,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
 	}
 
 	ret = page_address(page);
-	if (force_dma_unencrypted()) {
+	if (force_dma_unencrypted(dev)) {
 		set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
 		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
 	} else {
@@ -192,7 +193,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
 		return;
 	}
 
-	if (force_dma_unencrypted())
+	if (force_dma_unencrypted(dev))
 		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
 
 	if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-10 19:01 [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks Lendacky, Thomas
@ 2019-07-11 10:05 ` Christoph Hellwig
  2019-07-11 12:18   ` Thomas Gleixner
  2019-07-11 12:16 ` Thomas Gleixner
  2019-07-24 15:55 ` Kirill A. Shutemov
  2 siblings, 1 reply; 12+ messages in thread
From: Christoph Hellwig @ 2019-07-11 10:05 UTC (permalink / raw)
  To: Lendacky, Thomas
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Robin Murphy,
	Christoph Hellwig

Hi Tom,

this looks good to me.  I'll wait a bit for feedback from the x86 folks,
and if everything is fine I'll apply it to the dma-mapping tree.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-10 19:01 [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks Lendacky, Thomas
  2019-07-11 10:05 ` Christoph Hellwig
@ 2019-07-11 12:16 ` Thomas Gleixner
  2019-07-24 15:55 ` Kirill A. Shutemov
  2 siblings, 0 replies; 12+ messages in thread
From: Thomas Gleixner @ 2019-07-11 12:16 UTC (permalink / raw)
  To: Lendacky, Thomas
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Robin Murphy, Christoph Hellwig

On Wed, 10 Jul 2019, Lendacky, Thomas wrote:

> From: Tom Lendacky <thomas.lendacky@amd.com>
> 
> If a device doesn't support DMA to a physical address that includes the
> encryption bit (currently bit 47, so 48-bit DMA), then the DMA must
> occur to unencrypted memory. SWIOTLB is used to satisfy that requirement
> if an IOMMU is not active (enabled or configured in passthrough mode).
> 
> However, commit fafadcd16595 ("swiotlb: don't dip into swiotlb pool for
> coherent allocations") modified the coherent allocation support in SWIOTLB
> to use the DMA direct coherent allocation support. When an IOMMU is not
> active, this resulted in dma_alloc_coherent() failing for devices that
> didn't support DMA addresses that included the encryption bit.
> 
> Addressing this requires changes to the force_dma_unencrypted() function
> in kernel/dma/direct.c. Since the function is now non-trivial and SME/SEV
> specific, update the DMA direct support to add an arch override for the
> force_dma_unencrypted() function. The arch override is selected when
> CONFIG_AMD_MEM_ENCRYPT is set. The arch override function resides in the
> arch/x86/mm/mem_encrypt.c file and forces unencrypted DMA when either SEV
> is active or SME is active and the device does not support DMA to physical
> addresses that include the encryption bit.
> 
> Fixes: fafadcd16595 ("swiotlb: don't dip into swiotlb pool for coherent allocations")
> Suggested-by: Christoph Hellwig <hch@lst.de>
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
> 
> Based on tree git://git.infradead.org/users/hch/dma-mapping.git for-next
> 
>  arch/x86/Kconfig          |  1 +

For the x86 parts:

Acked-by: Thomas Gleixner <tglx@linutronix.de>
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-11 10:05 ` Christoph Hellwig
@ 2019-07-11 12:18   ` Thomas Gleixner
  0 siblings, 0 replies; 12+ messages in thread
From: Thomas Gleixner @ 2019-07-11 12:18 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Lendacky, Thomas, Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86,
	linux-kernel, iommu, Ingo Molnar, Borislav Petkov,
	Andy Lutomirski, H . Peter Anvin, Robin Murphy

On Thu, 11 Jul 2019, Christoph Hellwig wrote:

> Hi Tom,
> 
> this looks good to me.  I'll wait a bit for feedback from the x86 folks,
> and if everything is fine I'll apply it to the dma-mapping tree.

Go ahead.

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-10 19:01 [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks Lendacky, Thomas
  2019-07-11 10:05 ` Christoph Hellwig
  2019-07-11 12:16 ` Thomas Gleixner
@ 2019-07-24 15:55 ` Kirill A. Shutemov
  2019-07-24 16:42   ` Lendacky, Thomas
  2 siblings, 1 reply; 12+ messages in thread
From: Kirill A. Shutemov @ 2019-07-24 15:55 UTC (permalink / raw)
  To: Lendacky, Thomas
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Robin Murphy,
	Christoph Hellwig

On Wed, Jul 10, 2019 at 07:01:19PM +0000, Lendacky, Thomas wrote:
> @@ -351,6 +355,32 @@ bool sev_active(void)
>  }
>  EXPORT_SYMBOL(sev_active);
>  
> +/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
> +bool force_dma_unencrypted(struct device *dev)
> +{
> +	/*
> +	 * For SEV, all DMA must be to unencrypted addresses.
> +	 */
> +	if (sev_active())
> +		return true;
> +
> +	/*
> +	 * For SME, all DMA must be to unencrypted addresses if the
> +	 * device does not support DMA to addresses that include the
> +	 * encryption mask.
> +	 */
> +	if (sme_active()) {
> +		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
> +		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
> +						dev->bus_dma_mask);
> +
> +		if (dma_dev_mask <= dma_enc_mask)
> +			return true;

Hm. What is wrong with the dev mask being equal to enc mask? IIUC, it
means that device mask is wide enough to cover encryption bit, doesn't it?

> +	}
> +
> +	return false;
> +}

-- 
 Kirill A. Shutemov
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-24 15:55 ` Kirill A. Shutemov
@ 2019-07-24 16:42   ` Lendacky, Thomas
  2019-07-24 17:06     ` Robin Murphy
  0 siblings, 1 reply; 12+ messages in thread
From: Lendacky, Thomas @ 2019-07-24 16:42 UTC (permalink / raw)
  To: Kirill A. Shutemov
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Robin Murphy,
	Christoph Hellwig

On 7/24/19 10:55 AM, Kirill A. Shutemov wrote:
> On Wed, Jul 10, 2019 at 07:01:19PM +0000, Lendacky, Thomas wrote:
>> @@ -351,6 +355,32 @@ bool sev_active(void)
>>  }
>>  EXPORT_SYMBOL(sev_active);
>>  
>> +/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
>> +bool force_dma_unencrypted(struct device *dev)
>> +{
>> +	/*
>> +	 * For SEV, all DMA must be to unencrypted addresses.
>> +	 */
>> +	if (sev_active())
>> +		return true;
>> +
>> +	/*
>> +	 * For SME, all DMA must be to unencrypted addresses if the
>> +	 * device does not support DMA to addresses that include the
>> +	 * encryption mask.
>> +	 */
>> +	if (sme_active()) {
>> +		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
>> +		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
>> +						dev->bus_dma_mask);
>> +
>> +		if (dma_dev_mask <= dma_enc_mask)
>> +			return true;
> 
> Hm. What is wrong with the dev mask being equal to enc mask? IIUC, it
> means that device mask is wide enough to cover encryption bit, doesn't it?

Not really...  it's the way DMA_BIT_MASK works vs bit numbering. Let's say
that sme_me_mask has bit 47 set. __ffs64 returns 47 and DMA_BIT_MASK(47)
will generate a mask without bit 47 set (0x7fffffffffff). So the check
will catch anything that does not support at least 48-bit DMA.

Thanks,
Tom

> 
>> +	}
>> +
>> +	return false;
>> +}
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-24 16:42   ` Lendacky, Thomas
@ 2019-07-24 17:06     ` Robin Murphy
  2019-07-24 17:34       ` Lendacky, Thomas
  0 siblings, 1 reply; 12+ messages in thread
From: Robin Murphy @ 2019-07-24 17:06 UTC (permalink / raw)
  To: Lendacky, Thomas, Kirill A. Shutemov
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Christoph Hellwig

On 24/07/2019 17:42, Lendacky, Thomas wrote:
> On 7/24/19 10:55 AM, Kirill A. Shutemov wrote:
>> On Wed, Jul 10, 2019 at 07:01:19PM +0000, Lendacky, Thomas wrote:
>>> @@ -351,6 +355,32 @@ bool sev_active(void)
>>>   }
>>>   EXPORT_SYMBOL(sev_active);
>>>   
>>> +/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
>>> +bool force_dma_unencrypted(struct device *dev)
>>> +{
>>> +	/*
>>> +	 * For SEV, all DMA must be to unencrypted addresses.
>>> +	 */
>>> +	if (sev_active())
>>> +		return true;
>>> +
>>> +	/*
>>> +	 * For SME, all DMA must be to unencrypted addresses if the
>>> +	 * device does not support DMA to addresses that include the
>>> +	 * encryption mask.
>>> +	 */
>>> +	if (sme_active()) {
>>> +		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
>>> +		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
>>> +						dev->bus_dma_mask);
>>> +
>>> +		if (dma_dev_mask <= dma_enc_mask)
>>> +			return true;
>>
>> Hm. What is wrong with the dev mask being equal to enc mask? IIUC, it
>> means that device mask is wide enough to cover encryption bit, doesn't it?
> 
> Not really...  it's the way DMA_BIT_MASK works vs bit numbering. Let's say
> that sme_me_mask has bit 47 set. __ffs64 returns 47 and DMA_BIT_MASK(47)
> will generate a mask without bit 47 set (0x7fffffffffff). So the check
> will catch anything that does not support at least 48-bit DMA.

Couldn't that be expressed as just:

	if (sme_me_mask & dma_dev_mask == sme_me_mask)

?

Robin.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-24 17:06     ` Robin Murphy
@ 2019-07-24 17:34       ` Lendacky, Thomas
  2019-07-24 18:11         ` Kirill A. Shutemov
  0 siblings, 1 reply; 12+ messages in thread
From: Lendacky, Thomas @ 2019-07-24 17:34 UTC (permalink / raw)
  To: Robin Murphy, Kirill A. Shutemov
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Christoph Hellwig

On 7/24/19 12:06 PM, Robin Murphy wrote:
> On 24/07/2019 17:42, Lendacky, Thomas wrote:
>> On 7/24/19 10:55 AM, Kirill A. Shutemov wrote:
>>> On Wed, Jul 10, 2019 at 07:01:19PM +0000, Lendacky, Thomas wrote:
>>>> @@ -351,6 +355,32 @@ bool sev_active(void)
>>>>   }
>>>>   EXPORT_SYMBOL(sev_active);
>>>>   +/* Override for DMA direct allocation check -
>>>> ARCH_HAS_FORCE_DMA_UNENCRYPTED */
>>>> +bool force_dma_unencrypted(struct device *dev)
>>>> +{
>>>> +    /*
>>>> +     * For SEV, all DMA must be to unencrypted addresses.
>>>> +     */
>>>> +    if (sev_active())
>>>> +        return true;
>>>> +
>>>> +    /*
>>>> +     * For SME, all DMA must be to unencrypted addresses if the
>>>> +     * device does not support DMA to addresses that include the
>>>> +     * encryption mask.
>>>> +     */
>>>> +    if (sme_active()) {
>>>> +        u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
>>>> +        u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
>>>> +                        dev->bus_dma_mask);
>>>> +
>>>> +        if (dma_dev_mask <= dma_enc_mask)
>>>> +            return true;
>>>
>>> Hm. What is wrong with the dev mask being equal to enc mask? IIUC, it
>>> means that device mask is wide enough to cover encryption bit, doesn't it?
>>
>> Not really...  it's the way DMA_BIT_MASK works vs bit numbering. Let's say
>> that sme_me_mask has bit 47 set. __ffs64 returns 47 and DMA_BIT_MASK(47)
>> will generate a mask without bit 47 set (0x7fffffffffff). So the check
>> will catch anything that does not support at least 48-bit DMA.
> 
> Couldn't that be expressed as just:
> 
>     if (sme_me_mask & dma_dev_mask == sme_me_mask)

Actually !=, but yes, it could have been done like that, I just didn't
think of it.

Thanks,
Tom

> 
> ?
> 
> Robin.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-24 17:34       ` Lendacky, Thomas
@ 2019-07-24 18:11         ` Kirill A. Shutemov
  2019-07-24 18:30           ` Lendacky, Thomas
  0 siblings, 1 reply; 12+ messages in thread
From: Kirill A. Shutemov @ 2019-07-24 18:11 UTC (permalink / raw)
  To: Lendacky, Thomas
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Robin Murphy,
	Christoph Hellwig

On Wed, Jul 24, 2019 at 05:34:26PM +0000, Lendacky, Thomas wrote:
> On 7/24/19 12:06 PM, Robin Murphy wrote:
> > On 24/07/2019 17:42, Lendacky, Thomas wrote:
> >> On 7/24/19 10:55 AM, Kirill A. Shutemov wrote:
> >>> On Wed, Jul 10, 2019 at 07:01:19PM +0000, Lendacky, Thomas wrote:
> >>>> @@ -351,6 +355,32 @@ bool sev_active(void)
> >>>>   }
> >>>>   EXPORT_SYMBOL(sev_active);
> >>>>   +/* Override for DMA direct allocation check -
> >>>> ARCH_HAS_FORCE_DMA_UNENCRYPTED */
> >>>> +bool force_dma_unencrypted(struct device *dev)
> >>>> +{
> >>>> +    /*
> >>>> +     * For SEV, all DMA must be to unencrypted addresses.
> >>>> +     */
> >>>> +    if (sev_active())
> >>>> +        return true;
> >>>> +
> >>>> +    /*
> >>>> +     * For SME, all DMA must be to unencrypted addresses if the
> >>>> +     * device does not support DMA to addresses that include the
> >>>> +     * encryption mask.
> >>>> +     */
> >>>> +    if (sme_active()) {
> >>>> +        u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
> >>>> +        u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
> >>>> +                        dev->bus_dma_mask);
> >>>> +
> >>>> +        if (dma_dev_mask <= dma_enc_mask)
> >>>> +            return true;
> >>>
> >>> Hm. What is wrong with the dev mask being equal to enc mask? IIUC, it
> >>> means that device mask is wide enough to cover encryption bit, doesn't it?
> >>
> >> Not really...  it's the way DMA_BIT_MASK works vs bit numbering. Let's say
> >> that sme_me_mask has bit 47 set. __ffs64 returns 47 and DMA_BIT_MASK(47)
> >> will generate a mask without bit 47 set (0x7fffffffffff). So the check
> >> will catch anything that does not support at least 48-bit DMA.
> > 
> > Couldn't that be expressed as just:
> > 
> >     if (sme_me_mask & dma_dev_mask == sme_me_mask)
> 
> Actually !=, but yes, it could have been done like that, I just didn't
> think of it.

I'm looking into generalizing the check to cover MKTME.

Leaving	off the Kconfig changes and moving the check to other file, doest
the change below look reasonable to you. It's only build tested so far.

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index fece30ca8b0c..6c86adcd02da 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -355,6 +355,8 @@ EXPORT_SYMBOL(sev_active);
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
 {
+	u64 dma_enc_mask;
+
 	/*
 	 * For SEV, all DMA must be to unencrypted addresses.
 	 */
@@ -362,18 +364,20 @@ bool force_dma_unencrypted(struct device *dev)
 		return true;
 
 	/*
-	 * For SME, all DMA must be to unencrypted addresses if the
-	 * device does not support DMA to addresses that include the
-	 * encryption mask.
+	 * For SME and MKTME, all DMA must be to unencrypted addresses if the
+	 * device does not support DMA to addresses that include the encryption
+	 * mask.
 	 */
-	if (sme_active()) {
-		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
-		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
-						dev->bus_dma_mask);
+	if (!sme_active() && !mktme_enabled())
+		return false;
 
-		if (dma_dev_mask <= dma_enc_mask)
-			return true;
-	}
+	dma_enc_mask = sme_me_mask | mktme_keyid_mask();
+
+	if (dev->coherent_dma_mask && (dev->coherent_dma_mask & dma_enc_mask) != dma_enc_mask)
+		return true;
+
+	if (dev->bus_dma_mask && (dev->bus_dma_mask & dma_enc_mask) != dma_enc_mask)
+		return true;
 
 	return false;
 }
-- 
 Kirill A. Shutemov
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-24 18:11         ` Kirill A. Shutemov
@ 2019-07-24 18:30           ` Lendacky, Thomas
  2019-07-24 18:40             ` Kirill A. Shutemov
  0 siblings, 1 reply; 12+ messages in thread
From: Lendacky, Thomas @ 2019-07-24 18:30 UTC (permalink / raw)
  To: Kirill A. Shutemov
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Robin Murphy,
	Christoph Hellwig

On 7/24/19 1:11 PM, Kirill A. Shutemov wrote:
> On Wed, Jul 24, 2019 at 05:34:26PM +0000, Lendacky, Thomas wrote:
>> On 7/24/19 12:06 PM, Robin Murphy wrote:
>>> On 24/07/2019 17:42, Lendacky, Thomas wrote:
>>>> On 7/24/19 10:55 AM, Kirill A. Shutemov wrote:
>>>>> On Wed, Jul 10, 2019 at 07:01:19PM +0000, Lendacky, Thomas wrote:
>>>>>> @@ -351,6 +355,32 @@ bool sev_active(void)
>>>>>>   }
>>>>>>   EXPORT_SYMBOL(sev_active);
>>>>>>   +/* Override for DMA direct allocation check -
>>>>>> ARCH_HAS_FORCE_DMA_UNENCRYPTED */
>>>>>> +bool force_dma_unencrypted(struct device *dev)
>>>>>> +{
>>>>>> +    /*
>>>>>> +     * For SEV, all DMA must be to unencrypted addresses.
>>>>>> +     */
>>>>>> +    if (sev_active())
>>>>>> +        return true;
>>>>>> +
>>>>>> +    /*
>>>>>> +     * For SME, all DMA must be to unencrypted addresses if the
>>>>>> +     * device does not support DMA to addresses that include the
>>>>>> +     * encryption mask.
>>>>>> +     */
>>>>>> +    if (sme_active()) {
>>>>>> +        u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
>>>>>> +        u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
>>>>>> +                        dev->bus_dma_mask);
>>>>>> +
>>>>>> +        if (dma_dev_mask <= dma_enc_mask)
>>>>>> +            return true;
>>>>>
>>>>> Hm. What is wrong with the dev mask being equal to enc mask? IIUC, it
>>>>> means that device mask is wide enough to cover encryption bit, doesn't it?
>>>>
>>>> Not really...  it's the way DMA_BIT_MASK works vs bit numbering. Let's say
>>>> that sme_me_mask has bit 47 set. __ffs64 returns 47 and DMA_BIT_MASK(47)
>>>> will generate a mask without bit 47 set (0x7fffffffffff). So the check
>>>> will catch anything that does not support at least 48-bit DMA.
>>>
>>> Couldn't that be expressed as just:
>>>
>>>     if (sme_me_mask & dma_dev_mask == sme_me_mask)
>>
>> Actually !=, but yes, it could have been done like that, I just didn't
>> think of it.
> 
> I'm looking into generalizing the check to cover MKTME.
> 
> Leaving	off the Kconfig changes and moving the check to other file, doest
> the change below look reasonable to you. It's only build tested so far.
> 
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index fece30ca8b0c..6c86adcd02da 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -355,6 +355,8 @@ EXPORT_SYMBOL(sev_active);
>  /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
>  bool force_dma_unencrypted(struct device *dev)
>  {
> +	u64 dma_enc_mask;
> +
>  	/*
>  	 * For SEV, all DMA must be to unencrypted addresses.
>  	 */
> @@ -362,18 +364,20 @@ bool force_dma_unencrypted(struct device *dev)
>  		return true;
>  
>  	/*
> -	 * For SME, all DMA must be to unencrypted addresses if the
> -	 * device does not support DMA to addresses that include the
> -	 * encryption mask.
> +	 * For SME and MKTME, all DMA must be to unencrypted addresses if the
> +	 * device does not support DMA to addresses that include the encryption
> +	 * mask.
>  	 */
> -	if (sme_active()) {
> -		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
> -		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
> -						dev->bus_dma_mask);
> +	if (!sme_active() && !mktme_enabled())
> +		return false;
>  
> -		if (dma_dev_mask <= dma_enc_mask)
> -			return true;
> -	}
> +	dma_enc_mask = sme_me_mask | mktme_keyid_mask();
> +
> +	if (dev->coherent_dma_mask && (dev->coherent_dma_mask & dma_enc_mask) != dma_enc_mask)
> +		return true;
> +
> +	if (dev->bus_dma_mask && (dev->bus_dma_mask & dma_enc_mask) != dma_enc_mask)
> +		return true;

Do you want to err on the side of caution and return true if both masks
are zero? You could do the min_not_zero step and then return true if the
result is zero. Then just make the one comparison against dma_enc_mask.

Thanks,
Tom

>  
>  	return false;
>  }
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-24 18:30           ` Lendacky, Thomas
@ 2019-07-24 18:40             ` Kirill A. Shutemov
  2019-07-24 18:49               ` Lendacky, Thomas
  0 siblings, 1 reply; 12+ messages in thread
From: Kirill A. Shutemov @ 2019-07-24 18:40 UTC (permalink / raw)
  To: Lendacky, Thomas
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Robin Murphy,
	Christoph Hellwig

On Wed, Jul 24, 2019 at 06:30:21PM +0000, Lendacky, Thomas wrote:
> On 7/24/19 1:11 PM, Kirill A. Shutemov wrote:
> > On Wed, Jul 24, 2019 at 05:34:26PM +0000, Lendacky, Thomas wrote:
> >> On 7/24/19 12:06 PM, Robin Murphy wrote:
> >>> On 24/07/2019 17:42, Lendacky, Thomas wrote:
> >>>> On 7/24/19 10:55 AM, Kirill A. Shutemov wrote:
> >>>>> On Wed, Jul 10, 2019 at 07:01:19PM +0000, Lendacky, Thomas wrote:
> >>>>>> @@ -351,6 +355,32 @@ bool sev_active(void)
> >>>>>>   }
> >>>>>>   EXPORT_SYMBOL(sev_active);
> >>>>>>   +/* Override for DMA direct allocation check -
> >>>>>> ARCH_HAS_FORCE_DMA_UNENCRYPTED */
> >>>>>> +bool force_dma_unencrypted(struct device *dev)
> >>>>>> +{
> >>>>>> +    /*
> >>>>>> +     * For SEV, all DMA must be to unencrypted addresses.
> >>>>>> +     */
> >>>>>> +    if (sev_active())
> >>>>>> +        return true;
> >>>>>> +
> >>>>>> +    /*
> >>>>>> +     * For SME, all DMA must be to unencrypted addresses if the
> >>>>>> +     * device does not support DMA to addresses that include the
> >>>>>> +     * encryption mask.
> >>>>>> +     */
> >>>>>> +    if (sme_active()) {
> >>>>>> +        u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
> >>>>>> +        u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
> >>>>>> +                        dev->bus_dma_mask);
> >>>>>> +
> >>>>>> +        if (dma_dev_mask <= dma_enc_mask)
> >>>>>> +            return true;
> >>>>>
> >>>>> Hm. What is wrong with the dev mask being equal to enc mask? IIUC, it
> >>>>> means that device mask is wide enough to cover encryption bit, doesn't it?
> >>>>
> >>>> Not really...  it's the way DMA_BIT_MASK works vs bit numbering. Let's say
> >>>> that sme_me_mask has bit 47 set. __ffs64 returns 47 and DMA_BIT_MASK(47)
> >>>> will generate a mask without bit 47 set (0x7fffffffffff). So the check
> >>>> will catch anything that does not support at least 48-bit DMA.
> >>>
> >>> Couldn't that be expressed as just:
> >>>
> >>>     if (sme_me_mask & dma_dev_mask == sme_me_mask)
> >>
> >> Actually !=, but yes, it could have been done like that, I just didn't
> >> think of it.
> > 
> > I'm looking into generalizing the check to cover MKTME.
> > 
> > Leaving	off the Kconfig changes and moving the check to other file, doest
> > the change below look reasonable to you. It's only build tested so far.
> > 
> > diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> > index fece30ca8b0c..6c86adcd02da 100644
> > --- a/arch/x86/mm/mem_encrypt.c
> > +++ b/arch/x86/mm/mem_encrypt.c
> > @@ -355,6 +355,8 @@ EXPORT_SYMBOL(sev_active);
> >  /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
> >  bool force_dma_unencrypted(struct device *dev)
> >  {
> > +	u64 dma_enc_mask;
> > +
> >  	/*
> >  	 * For SEV, all DMA must be to unencrypted addresses.
> >  	 */
> > @@ -362,18 +364,20 @@ bool force_dma_unencrypted(struct device *dev)
> >  		return true;
> >  
> >  	/*
> > -	 * For SME, all DMA must be to unencrypted addresses if the
> > -	 * device does not support DMA to addresses that include the
> > -	 * encryption mask.
> > +	 * For SME and MKTME, all DMA must be to unencrypted addresses if the
> > +	 * device does not support DMA to addresses that include the encryption
> > +	 * mask.
> >  	 */
> > -	if (sme_active()) {
> > -		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
> > -		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
> > -						dev->bus_dma_mask);
> > +	if (!sme_active() && !mktme_enabled())
> > +		return false;
> >  
> > -		if (dma_dev_mask <= dma_enc_mask)
> > -			return true;
> > -	}
> > +	dma_enc_mask = sme_me_mask | mktme_keyid_mask();
> > +
> > +	if (dev->coherent_dma_mask && (dev->coherent_dma_mask & dma_enc_mask) != dma_enc_mask)
> > +		return true;
> > +
> > +	if (dev->bus_dma_mask && (dev->bus_dma_mask & dma_enc_mask) != dma_enc_mask)
> > +		return true;
> 
> Do you want to err on the side of caution and return true if both masks
> are zero? You could do the min_not_zero step and then return true if the
> result is zero. Then just make the one comparison against dma_enc_mask.

Something like this?

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index fece30ca8b0c..173d68b08c55 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -355,6 +355,8 @@ EXPORT_SYMBOL(sev_active);
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
 {
+	u64 dma_enc_mask, dma_dev_mask;
+
 	/*
 	 * For SEV, all DMA must be to unencrypted addresses.
 	 */
@@ -362,20 +364,17 @@ bool force_dma_unencrypted(struct device *dev)
 		return true;
 
 	/*
-	 * For SME, all DMA must be to unencrypted addresses if the
-	 * device does not support DMA to addresses that include the
-	 * encryption mask.
+	 * For SME and MKTME, all DMA must be to unencrypted addresses if the
+	 * device does not support DMA to addresses that include the encryption
+	 * mask.
 	 */
-	if (sme_active()) {
-		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
-		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
-						dev->bus_dma_mask);
+	if (!sme_active() && !mktme_enabled())
+		return false;
 
-		if (dma_dev_mask <= dma_enc_mask)
-			return true;
-	}
+	dma_enc_mask = sme_me_mask | mktme_keyid_mask();
+	dma_dev_mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
 
-	return false;
+	return (dma_dev_mask & dma_enc_mask) != dma_enc_mask;
 }
 
 /* Architecture __weak replacement functions */
-- 
 Kirill A. Shutemov
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks
  2019-07-24 18:40             ` Kirill A. Shutemov
@ 2019-07-24 18:49               ` Lendacky, Thomas
  0 siblings, 0 replies; 12+ messages in thread
From: Lendacky, Thomas @ 2019-07-24 18:49 UTC (permalink / raw)
  To: Kirill A. Shutemov
  Cc: Dave Hansen, Lianbo Jiang, Peter Zijlstra, x86, linux-kernel,
	iommu, Ingo Molnar, Borislav Petkov, Andy Lutomirski,
	H . Peter Anvin, Thomas Gleixner, Robin Murphy,
	Christoph Hellwig

On 7/24/19 1:40 PM, Kirill A. Shutemov wrote:
> On Wed, Jul 24, 2019 at 06:30:21PM +0000, Lendacky, Thomas wrote:
>> On 7/24/19 1:11 PM, Kirill A. Shutemov wrote:
>>> On Wed, Jul 24, 2019 at 05:34:26PM +0000, Lendacky, Thomas wrote:
>>>> On 7/24/19 12:06 PM, Robin Murphy wrote:
>>>>> On 24/07/2019 17:42, Lendacky, Thomas wrote:
>>>>>> On 7/24/19 10:55 AM, Kirill A. Shutemov wrote:
>>>>>>> On Wed, Jul 10, 2019 at 07:01:19PM +0000, Lendacky, Thomas wrote:
>>>>>>>> @@ -351,6 +355,32 @@ bool sev_active(void)
>>>>>>>>   }
>>>>>>>>   EXPORT_SYMBOL(sev_active);
>>>>>>>>   +/* Override for DMA direct allocation check -
>>>>>>>> ARCH_HAS_FORCE_DMA_UNENCRYPTED */
>>>>>>>> +bool force_dma_unencrypted(struct device *dev)
>>>>>>>> +{
>>>>>>>> +    /*
>>>>>>>> +     * For SEV, all DMA must be to unencrypted addresses.
>>>>>>>> +     */
>>>>>>>> +    if (sev_active())
>>>>>>>> +        return true;
>>>>>>>> +
>>>>>>>> +    /*
>>>>>>>> +     * For SME, all DMA must be to unencrypted addresses if the
>>>>>>>> +     * device does not support DMA to addresses that include the
>>>>>>>> +     * encryption mask.
>>>>>>>> +     */
>>>>>>>> +    if (sme_active()) {
>>>>>>>> +        u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
>>>>>>>> +        u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
>>>>>>>> +                        dev->bus_dma_mask);
>>>>>>>> +
>>>>>>>> +        if (dma_dev_mask <= dma_enc_mask)
>>>>>>>> +            return true;
>>>>>>>
>>>>>>> Hm. What is wrong with the dev mask being equal to enc mask? IIUC, it
>>>>>>> means that device mask is wide enough to cover encryption bit, doesn't it?
>>>>>>
>>>>>> Not really...  it's the way DMA_BIT_MASK works vs bit numbering. Let's say
>>>>>> that sme_me_mask has bit 47 set. __ffs64 returns 47 and DMA_BIT_MASK(47)
>>>>>> will generate a mask without bit 47 set (0x7fffffffffff). So the check
>>>>>> will catch anything that does not support at least 48-bit DMA.
>>>>>
>>>>> Couldn't that be expressed as just:
>>>>>
>>>>>     if (sme_me_mask & dma_dev_mask == sme_me_mask)
>>>>
>>>> Actually !=, but yes, it could have been done like that, I just didn't
>>>> think of it.
>>>
>>> I'm looking into generalizing the check to cover MKTME.
>>>
>>> Leaving	off the Kconfig changes and moving the check to other file, doest
>>> the change below look reasonable to you. It's only build tested so far.
>>>
>>> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
>>> index fece30ca8b0c..6c86adcd02da 100644
>>> --- a/arch/x86/mm/mem_encrypt.c
>>> +++ b/arch/x86/mm/mem_encrypt.c
>>> @@ -355,6 +355,8 @@ EXPORT_SYMBOL(sev_active);
>>>  /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
>>>  bool force_dma_unencrypted(struct device *dev)
>>>  {
>>> +	u64 dma_enc_mask;
>>> +
>>>  	/*
>>>  	 * For SEV, all DMA must be to unencrypted addresses.
>>>  	 */
>>> @@ -362,18 +364,20 @@ bool force_dma_unencrypted(struct device *dev)
>>>  		return true;
>>>  
>>>  	/*
>>> -	 * For SME, all DMA must be to unencrypted addresses if the
>>> -	 * device does not support DMA to addresses that include the
>>> -	 * encryption mask.
>>> +	 * For SME and MKTME, all DMA must be to unencrypted addresses if the
>>> +	 * device does not support DMA to addresses that include the encryption
>>> +	 * mask.
>>>  	 */
>>> -	if (sme_active()) {
>>> -		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
>>> -		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
>>> -						dev->bus_dma_mask);
>>> +	if (!sme_active() && !mktme_enabled())
>>> +		return false;
>>>  
>>> -		if (dma_dev_mask <= dma_enc_mask)
>>> -			return true;
>>> -	}
>>> +	dma_enc_mask = sme_me_mask | mktme_keyid_mask();
>>> +
>>> +	if (dev->coherent_dma_mask && (dev->coherent_dma_mask & dma_enc_mask) != dma_enc_mask)
>>> +		return true;
>>> +
>>> +	if (dev->bus_dma_mask && (dev->bus_dma_mask & dma_enc_mask) != dma_enc_mask)
>>> +		return true;
>>
>> Do you want to err on the side of caution and return true if both masks
>> are zero? You could do the min_not_zero step and then return true if the
>> result is zero. Then just make the one comparison against dma_enc_mask.
> 
> Something like this?

Yup, looks good.

Thanks,
Tom

> 
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index fece30ca8b0c..173d68b08c55 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -355,6 +355,8 @@ EXPORT_SYMBOL(sev_active);
>  /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
>  bool force_dma_unencrypted(struct device *dev)
>  {
> +	u64 dma_enc_mask, dma_dev_mask;
> +
>  	/*
>  	 * For SEV, all DMA must be to unencrypted addresses.
>  	 */
> @@ -362,20 +364,17 @@ bool force_dma_unencrypted(struct device *dev)
>  		return true;
>  
>  	/*
> -	 * For SME, all DMA must be to unencrypted addresses if the
> -	 * device does not support DMA to addresses that include the
> -	 * encryption mask.
> +	 * For SME and MKTME, all DMA must be to unencrypted addresses if the
> +	 * device does not support DMA to addresses that include the encryption
> +	 * mask.
>  	 */
> -	if (sme_active()) {
> -		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
> -		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
> -						dev->bus_dma_mask);
> +	if (!sme_active() && !mktme_enabled())
> +		return false;
>  
> -		if (dma_dev_mask <= dma_enc_mask)
> -			return true;
> -	}
> +	dma_enc_mask = sme_me_mask | mktme_keyid_mask();
> +	dma_dev_mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
>  
> -	return false;
> +	return (dma_dev_mask & dma_enc_mask) != dma_enc_mask;
>  }
>  
>  /* Architecture __weak replacement functions */
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2019-07-24 18:49 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-10 19:01 [PATCH] dma-direct: Force unencrypted DMA under SME for certain DMA masks Lendacky, Thomas
2019-07-11 10:05 ` Christoph Hellwig
2019-07-11 12:18   ` Thomas Gleixner
2019-07-11 12:16 ` Thomas Gleixner
2019-07-24 15:55 ` Kirill A. Shutemov
2019-07-24 16:42   ` Lendacky, Thomas
2019-07-24 17:06     ` Robin Murphy
2019-07-24 17:34       ` Lendacky, Thomas
2019-07-24 18:11         ` Kirill A. Shutemov
2019-07-24 18:30           ` Lendacky, Thomas
2019-07-24 18:40             ` Kirill A. Shutemov
2019-07-24 18:49               ` Lendacky, Thomas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).