All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tom Lendacky <thomas.lendacky@amd.com>
To: Borislav Petkov <bp@alien8.de>
Cc: linux-arch@vger.kernel.org, linux-efi@vger.kernel.org,
	kvm@vger.kernel.org, linux-doc@vger.kernel.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, kasan-dev@googlegroups.com,
	linux-mm@kvack.org, iommu@lists.linux-foundation.org,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Joerg Roedel" <joro@8bytes.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Andrey Ryabinin" <aryabinin@virtuozzo.com>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Andy Lutomirski" <luto@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Alexander Potapenko" <glider@google.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Dmitry Vyukov" <dvyukov@google.com>
Subject: Re: [RFC PATCH v2 14/20] x86: DMA support for memory encryption
Date: Wed, 14 Sep 2016 08:36:30 -0500	[thread overview]
Message-ID: <cb3e6b91-44f7-ee01-6da1-82eb32243b85@amd.com> (raw)
In-Reply-To: <20160912105815.3z5bvzbcfjcj4ku7@pd.tnic>

On 09/12/2016 05:58 AM, Borislav Petkov wrote:
> On Mon, Aug 22, 2016 at 05:38:07PM -0500, Tom Lendacky wrote:
>> Since DMA addresses will effectively look like 48-bit addresses when the
>> memory encryption mask is set, SWIOTLB is needed if the DMA mask of the
>> device performing the DMA does not support 48-bits. SWIOTLB will be
>> initialized to create un-encrypted bounce buffers for use by these devices.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>  arch/x86/include/asm/dma-mapping.h |    5 ++-
>>  arch/x86/include/asm/mem_encrypt.h |    6 +++
>>  arch/x86/kernel/pci-dma.c          |   11 ++++--
>>  arch/x86/kernel/pci-nommu.c        |    2 +
>>  arch/x86/kernel/pci-swiotlb.c      |    8 +++--
>>  arch/x86/mm/mem_encrypt.c          |   22 ++++++++++++
>>  include/linux/swiotlb.h            |    1 +
>>  init/main.c                        |   13 +++++++
>>  lib/swiotlb.c                      |   64 ++++++++++++++++++++++++++++++++----
>>  9 files changed, 115 insertions(+), 17 deletions(-)
> 
> ...
> 
>> @@ -172,3 +174,23 @@ void __init sme_early_init(void)
>>  	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>>  		protection_map[i] = __pgprot(pgprot_val(protection_map[i]) | sme_me_mask);
>>  }
>> +
>> +/* Architecture __weak replacement functions */
>> +void __init mem_encrypt_init(void)
>> +{
>> +	if (!sme_me_mask)
>> +		return;
>> +
>> +	/* Make SWIOTLB use an unencrypted DMA area */
>> +	swiotlb_clear_encryption();
>> +}
>> +
>> +unsigned long swiotlb_get_me_mask(void)
> 
> This could just as well be named to something more generic:
> 
> swiotlb_get_clear_dma_mask() or so which basically means the mask of
> bits which get cleared before returning DMA addresses...

Ok.

> 
>> +{
>> +	return sme_me_mask;
>> +}
>> +
>> +void swiotlb_set_mem_dec(void *vaddr, unsigned long size)
>> +{
>> +	sme_set_mem_dec(vaddr, size);
>> +}
>> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
>> index 5f81f8a..5c909fc 100644
>> --- a/include/linux/swiotlb.h
>> +++ b/include/linux/swiotlb.h
>> @@ -29,6 +29,7 @@ int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
>>  extern unsigned long swiotlb_nr_tbl(void);
>>  unsigned long swiotlb_size_or_default(void);
>>  extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
>> +extern void __init swiotlb_clear_encryption(void);
>>  
>>  /*
>>   * Enumeration for sync targets
>> diff --git a/init/main.c b/init/main.c
>> index a8a58e2..82c7cd9 100644
>> --- a/init/main.c
>> +++ b/init/main.c
>> @@ -458,6 +458,10 @@ void __init __weak thread_stack_cache_init(void)
>>  }
>>  #endif
>>  
>> +void __init __weak mem_encrypt_init(void)
>> +{
>> +}
>> +
>>  /*
>>   * Set up kernel memory allocators
>>   */
>> @@ -598,6 +602,15 @@ asmlinkage __visible void __init start_kernel(void)
>>  	 */
>>  	locking_selftest();
>>  
>> +	/*
>> +	 * This needs to be called before any devices perform DMA
>> +	 * operations that might use the swiotlb bounce buffers.
>> +	 * This call will mark the bounce buffers as un-encrypted so
>> +	 * that the usage of them will not cause "plain-text" data
> 
> 	...that their usage will not cause ...

Ok.

> 
>> +	 * to be decrypted when accessed.
>> +	 */
>> +	mem_encrypt_init();
>> +
>>  #ifdef CONFIG_BLK_DEV_INITRD
>>  	if (initrd_start && !initrd_below_start_ok &&
>>  	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
>> diff --git a/lib/swiotlb.c b/lib/swiotlb.c
>> index 22e13a0..15d5741 100644
>> --- a/lib/swiotlb.c
>> +++ b/lib/swiotlb.c
>> @@ -131,6 +131,26 @@ unsigned long swiotlb_size_or_default(void)
>>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>>  }
>>  
>> +/*
>> + * Support for memory encryption. If memory encryption is supported, then an
>> + * override to these functions will be provided.
>> + */
> 
> No need for that comment.

Ok.

> 
>> +unsigned long __weak swiotlb_get_me_mask(void)
>> +{
>> +	return 0;
>> +}
>> +
>> +void __weak swiotlb_set_mem_dec(void *vaddr, unsigned long size)
>> +{
>> +}
>> +
>> +/* For swiotlb, clear memory encryption mask from dma addresses */
>> +static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
>> +				      phys_addr_t address)
>> +{
>> +	return phys_to_dma(hwdev, address) & ~swiotlb_get_me_mask();
>> +}
>> +
>>  /* Note that this doesn't work with highmem page */
>>  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
>>  				      volatile void *address)
>> @@ -159,6 +179,30 @@ void swiotlb_print_info(void)
>>  	       bytes >> 20, vstart, vend - 1);
>>  }
>>  
>> +/*
>> + * If memory encryption is active, the DMA address for an encrypted page may
>> + * be beyond the range of the device. If bounce buffers are required be sure
>> + * that they are not on an encrypted page. This should be called before the
>> + * iotlb area is used.
>> + */
>> +void __init swiotlb_clear_encryption(void)
>> +{
>> +	void *vaddr;
>> +	unsigned long bytes;
>> +
>> +	if (no_iotlb_memory || !io_tlb_start || late_alloc)
>> +		return;
>> +
>> +	vaddr = phys_to_virt(io_tlb_start);
>> +	bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
>> +	swiotlb_set_mem_dec(vaddr, bytes);
>> +	memset(vaddr, 0, bytes);
> 
> io_tlb_start is cleared...
> 
>> +
>> +	vaddr = phys_to_virt(io_tlb_overflow_buffer);
>> +	bytes = PAGE_ALIGN(io_tlb_overflow);
>> +	swiotlb_set_mem_dec(vaddr, bytes);
> 
> ... but io_tlb_overflow_buffer isn't? I don't see the difference here.

Yup, I missed that one.  Will memset this as well.

Thanks,
Tom

> 
>> +}
>> +
>>  int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
>>  {
>>  	void *v_overflow_buffer;

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky@amd.com>
To: Borislav Petkov <bp@alien8.de>
Cc: linux-arch@vger.kernel.org, linux-efi@vger.kernel.org,
	kvm@vger.kernel.org, linux-doc@vger.kernel.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, kasan-dev@googlegroups.com,
	linux-mm@kvack.org, iommu@lists.linux-foundation.org,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Joerg Roedel" <joro@8bytes.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Andrey Ryabinin" <aryabinin@virtuozzo.com>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Andy Lutomirski" <luto@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Alexander Potapenko" <glider@google.com>,
	"Thomas Gleixner" <tglx@linutronix.de>
Subject: Re: [RFC PATCH v2 14/20] x86: DMA support for memory encryption
Date: Wed, 14 Sep 2016 08:36:30 -0500	[thread overview]
Message-ID: <cb3e6b91-44f7-ee01-6da1-82eb32243b85@amd.com> (raw)
In-Reply-To: <20160912105815.3z5bvzbcfjcj4ku7@pd.tnic>

On 09/12/2016 05:58 AM, Borislav Petkov wrote:
> On Mon, Aug 22, 2016 at 05:38:07PM -0500, Tom Lendacky wrote:
>> Since DMA addresses will effectively look like 48-bit addresses when the
>> memory encryption mask is set, SWIOTLB is needed if the DMA mask of the
>> device performing the DMA does not support 48-bits. SWIOTLB will be
>> initialized to create un-encrypted bounce buffers for use by these devices.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>  arch/x86/include/asm/dma-mapping.h |    5 ++-
>>  arch/x86/include/asm/mem_encrypt.h |    6 +++
>>  arch/x86/kernel/pci-dma.c          |   11 ++++--
>>  arch/x86/kernel/pci-nommu.c        |    2 +
>>  arch/x86/kernel/pci-swiotlb.c      |    8 +++--
>>  arch/x86/mm/mem_encrypt.c          |   22 ++++++++++++
>>  include/linux/swiotlb.h            |    1 +
>>  init/main.c                        |   13 +++++++
>>  lib/swiotlb.c                      |   64 ++++++++++++++++++++++++++++++++----
>>  9 files changed, 115 insertions(+), 17 deletions(-)
> 
> ...
> 
>> @@ -172,3 +174,23 @@ void __init sme_early_init(void)
>>  	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>>  		protection_map[i] = __pgprot(pgprot_val(protection_map[i]) | sme_me_mask);
>>  }
>> +
>> +/* Architecture __weak replacement functions */
>> +void __init mem_encrypt_init(void)
>> +{
>> +	if (!sme_me_mask)
>> +		return;
>> +
>> +	/* Make SWIOTLB use an unencrypted DMA area */
>> +	swiotlb_clear_encryption();
>> +}
>> +
>> +unsigned long swiotlb_get_me_mask(void)
> 
> This could just as well be named to something more generic:
> 
> swiotlb_get_clear_dma_mask() or so which basically means the mask of
> bits which get cleared before returning DMA addresses...

Ok.

> 
>> +{
>> +	return sme_me_mask;
>> +}
>> +
>> +void swiotlb_set_mem_dec(void *vaddr, unsigned long size)
>> +{
>> +	sme_set_mem_dec(vaddr, size);
>> +}
>> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
>> index 5f81f8a..5c909fc 100644
>> --- a/include/linux/swiotlb.h
>> +++ b/include/linux/swiotlb.h
>> @@ -29,6 +29,7 @@ int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
>>  extern unsigned long swiotlb_nr_tbl(void);
>>  unsigned long swiotlb_size_or_default(void);
>>  extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
>> +extern void __init swiotlb_clear_encryption(void);
>>  
>>  /*
>>   * Enumeration for sync targets
>> diff --git a/init/main.c b/init/main.c
>> index a8a58e2..82c7cd9 100644
>> --- a/init/main.c
>> +++ b/init/main.c
>> @@ -458,6 +458,10 @@ void __init __weak thread_stack_cache_init(void)
>>  }
>>  #endif
>>  
>> +void __init __weak mem_encrypt_init(void)
>> +{
>> +}
>> +
>>  /*
>>   * Set up kernel memory allocators
>>   */
>> @@ -598,6 +602,15 @@ asmlinkage __visible void __init start_kernel(void)
>>  	 */
>>  	locking_selftest();
>>  
>> +	/*
>> +	 * This needs to be called before any devices perform DMA
>> +	 * operations that might use the swiotlb bounce buffers.
>> +	 * This call will mark the bounce buffers as un-encrypted so
>> +	 * that the usage of them will not cause "plain-text" data
> 
> 	...that their usage will not cause ...

Ok.

> 
>> +	 * to be decrypted when accessed.
>> +	 */
>> +	mem_encrypt_init();
>> +
>>  #ifdef CONFIG_BLK_DEV_INITRD
>>  	if (initrd_start && !initrd_below_start_ok &&
>>  	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
>> diff --git a/lib/swiotlb.c b/lib/swiotlb.c
>> index 22e13a0..15d5741 100644
>> --- a/lib/swiotlb.c
>> +++ b/lib/swiotlb.c
>> @@ -131,6 +131,26 @@ unsigned long swiotlb_size_or_default(void)
>>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>>  }
>>  
>> +/*
>> + * Support for memory encryption. If memory encryption is supported, then an
>> + * override to these functions will be provided.
>> + */
> 
> No need for that comment.

Ok.

> 
>> +unsigned long __weak swiotlb_get_me_mask(void)
>> +{
>> +	return 0;
>> +}
>> +
>> +void __weak swiotlb_set_mem_dec(void *vaddr, unsigned long size)
>> +{
>> +}
>> +
>> +/* For swiotlb, clear memory encryption mask from dma addresses */
>> +static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
>> +				      phys_addr_t address)
>> +{
>> +	return phys_to_dma(hwdev, address) & ~swiotlb_get_me_mask();
>> +}
>> +
>>  /* Note that this doesn't work with highmem page */
>>  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
>>  				      volatile void *address)
>> @@ -159,6 +179,30 @@ void swiotlb_print_info(void)
>>  	       bytes >> 20, vstart, vend - 1);
>>  }
>>  
>> +/*
>> + * If memory encryption is active, the DMA address for an encrypted page may
>> + * be beyond the range of the device. If bounce buffers are required be sure
>> + * that they are not on an encrypted page. This should be called before the
>> + * iotlb area is used.
>> + */
>> +void __init swiotlb_clear_encryption(void)
>> +{
>> +	void *vaddr;
>> +	unsigned long bytes;
>> +
>> +	if (no_iotlb_memory || !io_tlb_start || late_alloc)
>> +		return;
>> +
>> +	vaddr = phys_to_virt(io_tlb_start);
>> +	bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
>> +	swiotlb_set_mem_dec(vaddr, bytes);
>> +	memset(vaddr, 0, bytes);
> 
> io_tlb_start is cleared...
> 
>> +
>> +	vaddr = phys_to_virt(io_tlb_overflow_buffer);
>> +	bytes = PAGE_ALIGN(io_tlb_overflow);
>> +	swiotlb_set_mem_dec(vaddr, bytes);
> 
> ... but io_tlb_overflow_buffer isn't? I don't see the difference here.

Yup, I missed that one.  Will memset this as well.

Thanks,
Tom

> 
>> +}
>> +
>>  int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
>>  {
>>  	void *v_overflow_buffer;

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky@amd.com>
To: Borislav Petkov <bp@alien8.de>
Cc: linux-arch@vger.kernel.org, linux-efi@vger.kernel.org,
	kvm@vger.kernel.org, linux-doc@vger.kernel.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, kasan-dev@googlegroups.com,
	linux-mm@kvack.org, iommu@lists.linux-foundation.org,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Joerg Roedel" <joro@8bytes.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Andrey Ryabinin" <aryabinin@virtuozzo.com>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Andy Lutomirski" <luto@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Alexander Potapenko" <glider@google.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Dmitry Vyukov" <dvyukov@google.com>
Subject: Re: [RFC PATCH v2 14/20] x86: DMA support for memory encryption
Date: Wed, 14 Sep 2016 08:36:30 -0500	[thread overview]
Message-ID: <cb3e6b91-44f7-ee01-6da1-82eb32243b85@amd.com> (raw)
In-Reply-To: <20160912105815.3z5bvzbcfjcj4ku7@pd.tnic>

On 09/12/2016 05:58 AM, Borislav Petkov wrote:
> On Mon, Aug 22, 2016 at 05:38:07PM -0500, Tom Lendacky wrote:
>> Since DMA addresses will effectively look like 48-bit addresses when the
>> memory encryption mask is set, SWIOTLB is needed if the DMA mask of the
>> device performing the DMA does not support 48-bits. SWIOTLB will be
>> initialized to create un-encrypted bounce buffers for use by these devices.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>  arch/x86/include/asm/dma-mapping.h |    5 ++-
>>  arch/x86/include/asm/mem_encrypt.h |    6 +++
>>  arch/x86/kernel/pci-dma.c          |   11 ++++--
>>  arch/x86/kernel/pci-nommu.c        |    2 +
>>  arch/x86/kernel/pci-swiotlb.c      |    8 +++--
>>  arch/x86/mm/mem_encrypt.c          |   22 ++++++++++++
>>  include/linux/swiotlb.h            |    1 +
>>  init/main.c                        |   13 +++++++
>>  lib/swiotlb.c                      |   64 ++++++++++++++++++++++++++++++++----
>>  9 files changed, 115 insertions(+), 17 deletions(-)
> 
> ...
> 
>> @@ -172,3 +174,23 @@ void __init sme_early_init(void)
>>  	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>>  		protection_map[i] = __pgprot(pgprot_val(protection_map[i]) | sme_me_mask);
>>  }
>> +
>> +/* Architecture __weak replacement functions */
>> +void __init mem_encrypt_init(void)
>> +{
>> +	if (!sme_me_mask)
>> +		return;
>> +
>> +	/* Make SWIOTLB use an unencrypted DMA area */
>> +	swiotlb_clear_encryption();
>> +}
>> +
>> +unsigned long swiotlb_get_me_mask(void)
> 
> This could just as well be named to something more generic:
> 
> swiotlb_get_clear_dma_mask() or so which basically means the mask of
> bits which get cleared before returning DMA addresses...

Ok.

> 
>> +{
>> +	return sme_me_mask;
>> +}
>> +
>> +void swiotlb_set_mem_dec(void *vaddr, unsigned long size)
>> +{
>> +	sme_set_mem_dec(vaddr, size);
>> +}
>> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
>> index 5f81f8a..5c909fc 100644
>> --- a/include/linux/swiotlb.h
>> +++ b/include/linux/swiotlb.h
>> @@ -29,6 +29,7 @@ int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
>>  extern unsigned long swiotlb_nr_tbl(void);
>>  unsigned long swiotlb_size_or_default(void);
>>  extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
>> +extern void __init swiotlb_clear_encryption(void);
>>  
>>  /*
>>   * Enumeration for sync targets
>> diff --git a/init/main.c b/init/main.c
>> index a8a58e2..82c7cd9 100644
>> --- a/init/main.c
>> +++ b/init/main.c
>> @@ -458,6 +458,10 @@ void __init __weak thread_stack_cache_init(void)
>>  }
>>  #endif
>>  
>> +void __init __weak mem_encrypt_init(void)
>> +{
>> +}
>> +
>>  /*
>>   * Set up kernel memory allocators
>>   */
>> @@ -598,6 +602,15 @@ asmlinkage __visible void __init start_kernel(void)
>>  	 */
>>  	locking_selftest();
>>  
>> +	/*
>> +	 * This needs to be called before any devices perform DMA
>> +	 * operations that might use the swiotlb bounce buffers.
>> +	 * This call will mark the bounce buffers as un-encrypted so
>> +	 * that the usage of them will not cause "plain-text" data
> 
> 	...that their usage will not cause ...

Ok.

> 
>> +	 * to be decrypted when accessed.
>> +	 */
>> +	mem_encrypt_init();
>> +
>>  #ifdef CONFIG_BLK_DEV_INITRD
>>  	if (initrd_start && !initrd_below_start_ok &&
>>  	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
>> diff --git a/lib/swiotlb.c b/lib/swiotlb.c
>> index 22e13a0..15d5741 100644
>> --- a/lib/swiotlb.c
>> +++ b/lib/swiotlb.c
>> @@ -131,6 +131,26 @@ unsigned long swiotlb_size_or_default(void)
>>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>>  }
>>  
>> +/*
>> + * Support for memory encryption. If memory encryption is supported, then an
>> + * override to these functions will be provided.
>> + */
> 
> No need for that comment.

Ok.

> 
>> +unsigned long __weak swiotlb_get_me_mask(void)
>> +{
>> +	return 0;
>> +}
>> +
>> +void __weak swiotlb_set_mem_dec(void *vaddr, unsigned long size)
>> +{
>> +}
>> +
>> +/* For swiotlb, clear memory encryption mask from dma addresses */
>> +static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
>> +				      phys_addr_t address)
>> +{
>> +	return phys_to_dma(hwdev, address) & ~swiotlb_get_me_mask();
>> +}
>> +
>>  /* Note that this doesn't work with highmem page */
>>  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
>>  				      volatile void *address)
>> @@ -159,6 +179,30 @@ void swiotlb_print_info(void)
>>  	       bytes >> 20, vstart, vend - 1);
>>  }
>>  
>> +/*
>> + * If memory encryption is active, the DMA address for an encrypted page may
>> + * be beyond the range of the device. If bounce buffers are required be sure
>> + * that they are not on an encrypted page. This should be called before the
>> + * iotlb area is used.
>> + */
>> +void __init swiotlb_clear_encryption(void)
>> +{
>> +	void *vaddr;
>> +	unsigned long bytes;
>> +
>> +	if (no_iotlb_memory || !io_tlb_start || late_alloc)
>> +		return;
>> +
>> +	vaddr = phys_to_virt(io_tlb_start);
>> +	bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
>> +	swiotlb_set_mem_dec(vaddr, bytes);
>> +	memset(vaddr, 0, bytes);
> 
> io_tlb_start is cleared...
> 
>> +
>> +	vaddr = phys_to_virt(io_tlb_overflow_buffer);
>> +	bytes = PAGE_ALIGN(io_tlb_overflow);
>> +	swiotlb_set_mem_dec(vaddr, bytes);
> 
> ... but io_tlb_overflow_buffer isn't? I don't see the difference here.

Yup, I missed that one.  Will memset this as well.

Thanks,
Tom

> 
>> +}
>> +
>>  int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
>>  {
>>  	void *v_overflow_buffer;

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2016-09-14 13:36 UTC|newest]

Thread overview: 229+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-22 22:35 [RFC PATCH v2 00/20] x86: Secure Memory Encryption (AMD) Tom Lendacky
2016-08-22 22:35 ` Tom Lendacky
2016-08-22 22:35 ` Tom Lendacky
2016-08-22 22:35 ` Tom Lendacky
2016-08-22 22:35 ` [RFC PATCH v2 01/20] x86: Documentation for AMD Secure Memory Encryption (SME) Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-09-02  8:50   ` Borislav Petkov
2016-09-02  8:50     ` Borislav Petkov
2016-09-07 14:02     ` Tom Lendacky
2016-09-07 14:02       ` Tom Lendacky
2016-09-07 14:02       ` Tom Lendacky
2016-09-07 15:23       ` Borislav Petkov
2016-09-07 15:23         ` Borislav Petkov
2016-08-22 22:35 ` [RFC PATCH v2 02/20] x86: Set the write-protect cache mode for full PAT support Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-08-25  3:58   ` Borislav Petkov
2016-08-25  3:58     ` Borislav Petkov
2016-08-22 22:35 ` [RFC PATCH v2 03/20] x86: Secure Memory Encryption (SME) build enablement Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-08-22 22:35   ` Tom Lendacky
2016-09-02 11:03   ` Borislav Petkov
2016-09-02 11:03     ` Borislav Petkov
2016-09-07 14:03     ` Tom Lendacky
2016-09-07 14:03       ` Tom Lendacky
2016-08-22 22:36 ` [RFC PATCH v2 04/20] x86: Secure Memory Encryption (SME) support Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-25 13:04   ` Thomas Gleixner
2016-08-25 13:04     ` Thomas Gleixner
2016-08-30 13:19     ` Tom Lendacky
2016-08-30 13:19       ` Tom Lendacky
2016-08-30 14:57       ` Andy Lutomirski
2016-08-30 14:57         ` Andy Lutomirski
2016-08-30 14:57         ` Andy Lutomirski
2016-08-31 13:26         ` Tom Lendacky
2016-08-31 13:26           ` Tom Lendacky
2016-08-31 13:26           ` Tom Lendacky
2016-08-22 22:36 ` [RFC PATCH v2 05/20] x86: Add the Secure Memory Encryption cpu feature Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-09-02 14:09   ` Borislav Petkov
2016-09-02 14:09     ` Borislav Petkov
2016-09-07 14:07     ` Tom Lendacky
2016-09-07 14:07       ` Tom Lendacky
2016-08-22 22:36 ` [RFC PATCH v2 06/20] x86: Handle reduction in physical address size with SME Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36 ` [RFC PATCH v2 07/20] x86: Provide general kernel support for memory encryption Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-08-22 22:36   ` Tom Lendacky
2016-09-02 18:14   ` Borislav Petkov
2016-09-02 18:14     ` Borislav Petkov
2016-09-07 14:11     ` Tom Lendacky
2016-09-07 14:11       ` Tom Lendacky
2016-09-05  8:48   ` Borislav Petkov
2016-09-05  8:48     ` Borislav Petkov
2016-09-07 14:16     ` Tom Lendacky
2016-09-07 14:16       ` Tom Lendacky
2016-09-05 15:22   ` Borislav Petkov
2016-09-05 15:22     ` Borislav Petkov
2016-09-07 14:19     ` Tom Lendacky
2016-09-07 14:19       ` Tom Lendacky
2016-09-07 14:19       ` Tom Lendacky
2016-09-06  9:31   ` Borislav Petkov
2016-09-06  9:31     ` Borislav Petkov
2016-09-07 14:30     ` Tom Lendacky
2016-09-07 14:30       ` Tom Lendacky
2016-09-07 14:30       ` Tom Lendacky
2016-09-07 15:55       ` Borislav Petkov
2016-09-07 15:55         ` Borislav Petkov
2016-09-08 13:26         ` Tom Lendacky
2016-09-08 13:26           ` Tom Lendacky
2016-09-08 13:26           ` Tom Lendacky
2016-09-08 13:55           ` Borislav Petkov
2016-09-08 13:55             ` Borislav Petkov
2016-09-12 13:43             ` Tom Lendacky
2016-09-12 13:43               ` Tom Lendacky
2016-08-22 22:37 ` [RFC PATCH v2 08/20] x86: Extend the early_memmap support with additional attrs Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37 ` [RFC PATCH v2 09/20] x86: Add support for early encryption/decryption of memory Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-09-06 16:12   ` Borislav Petkov
2016-09-06 16:12     ` Borislav Petkov
2016-08-22 22:37 ` [RFC PATCH v2 10/20] x86: Insure that memory areas are encrypted when possible Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-09-09 15:53   ` Borislav Petkov
2016-09-09 15:53     ` Borislav Petkov
2016-09-12 15:05     ` Tom Lendacky
2016-09-12 15:05       ` Tom Lendacky
2016-09-12 15:05       ` Tom Lendacky
2016-09-12 16:33       ` Borislav Petkov
2016-09-12 16:33         ` Borislav Petkov
2016-09-14 14:11         ` Tom Lendacky
2016-09-14 14:11           ` Tom Lendacky
2016-09-14 14:11           ` Tom Lendacky
2016-08-22 22:37 ` [RFC PATCH v2 11/20] mm: Access BOOT related data in the clear Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-09-09 16:38   ` Borislav Petkov
2016-09-09 16:38     ` Borislav Petkov
2016-09-12 15:14     ` Tom Lendacky
2016-09-12 15:14       ` Tom Lendacky
2016-09-12 15:14       ` Tom Lendacky
2016-09-12 16:35       ` Borislav Petkov
2016-09-12 16:35         ` Borislav Petkov
2016-09-12 16:55   ` Andy Lutomirski
2016-09-12 16:55     ` Andy Lutomirski
2016-09-12 16:55     ` Andy Lutomirski
2016-09-14 14:20     ` Tom Lendacky
2016-09-14 14:20       ` Tom Lendacky
2016-09-14 14:20       ` Tom Lendacky
2016-09-15  9:57       ` Matt Fleming
2016-09-15  9:57         ` Matt Fleming
2016-09-15  9:57         ` Matt Fleming
2016-09-15 16:52         ` Tom Lendacky
2016-09-15 16:52           ` Tom Lendacky
2016-09-15 16:52           ` Tom Lendacky
2016-08-22 22:37 ` [RFC PATCH v2 12/20] x86: Add support for changing memory encryption attribute Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-09-09 17:23   ` Borislav Petkov
2016-09-09 17:23     ` Borislav Petkov
2016-09-12 15:41     ` Tom Lendacky
2016-09-12 15:41       ` Tom Lendacky
2016-09-12 15:41       ` Tom Lendacky
2016-09-12 16:41       ` Borislav Petkov
2016-09-12 16:41         ` Borislav Petkov
2016-08-22 22:37 ` [RFC PATCH v2 13/20] x86: Decrypt trampoline area if memory encryption is active Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-08-22 22:37   ` Tom Lendacky
2016-09-09 17:34   ` Borislav Petkov
2016-09-09 17:34     ` Borislav Petkov
2016-09-12 15:43     ` Tom Lendacky
2016-09-12 15:43       ` Tom Lendacky
2016-08-22 22:38 ` [RFC PATCH v2 14/20] x86: DMA support for memory encryption Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-09-12 10:58   ` Borislav Petkov
2016-09-12 10:58     ` Borislav Petkov
2016-09-14 13:36     ` Tom Lendacky [this message]
2016-09-14 13:36       ` Tom Lendacky
2016-09-14 13:36       ` Tom Lendacky
2016-08-22 22:38 ` [RFC PATCH v2 15/20] iommu/amd: AMD IOMMU " Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-09-12 11:45   ` Borislav Petkov
2016-09-12 11:45     ` Borislav Petkov
2016-09-14 13:45     ` Tom Lendacky
2016-09-14 13:45       ` Tom Lendacky
2016-09-14 13:45       ` Tom Lendacky
2016-09-14 14:41       ` Borislav Petkov
2016-09-14 14:41         ` Borislav Petkov
2016-09-15 16:57         ` Tom Lendacky
2016-09-15 16:57           ` Tom Lendacky
2016-09-15 16:57           ` Tom Lendacky
2016-09-16  7:08           ` Borislav Petkov
2016-09-16  7:08             ` Borislav Petkov
2016-08-22 22:38 ` [RFC PATCH v2 16/20] x86: Check for memory encryption on the APs Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-09-12 12:17   ` Borislav Petkov
2016-09-12 12:17     ` Borislav Petkov
2016-09-14 13:50     ` Tom Lendacky
2016-09-14 13:50       ` Tom Lendacky
2016-09-12 16:43   ` Borislav Petkov
2016-09-12 16:43     ` Borislav Petkov
2016-09-14 14:12     ` Tom Lendacky
2016-09-14 14:12       ` Tom Lendacky
2016-09-14 14:12       ` Tom Lendacky
2016-08-22 22:38 ` [RFC PATCH v2 17/20] x86: Do not specify encrypted memory for VGA mapping Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38 ` [RFC PATCH v2 18/20] x86/kvm: Enable Secure Memory Encryption of nested page tables Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-09-12 14:35   ` Borislav Petkov
2016-09-12 14:35     ` Borislav Petkov
2016-09-14 14:02     ` Tom Lendacky
2016-09-14 14:02       ` Tom Lendacky
2016-08-22 22:38 ` [RFC PATCH v2 19/20] x86: Access the setup data through debugfs un-encrypted Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-08-22 22:38   ` Tom Lendacky
2016-09-12 16:59   ` Borislav Petkov
2016-09-12 16:59     ` Borislav Petkov
2016-09-14 14:29     ` Tom Lendacky
2016-09-14 14:29       ` Tom Lendacky
2016-09-14 14:29       ` Tom Lendacky
2016-09-14 14:51       ` Borislav Petkov
2016-09-14 14:51         ` Borislav Petkov
2016-09-15 17:08         ` Tom Lendacky
2016-09-15 17:08           ` Tom Lendacky
2016-09-15 17:08           ` Tom Lendacky
2016-09-16  7:11           ` Borislav Petkov
2016-09-16  7:11             ` Borislav Petkov
2016-08-22 22:39 ` [RFC PATCH v2 20/20] x86: Add support to make use of Secure Memory Encryption Tom Lendacky
2016-08-22 22:39   ` Tom Lendacky
2016-08-22 22:39   ` Tom Lendacky
2016-08-22 22:39   ` Tom Lendacky
2016-09-12 17:08   ` Borislav Petkov
2016-09-12 17:08     ` Borislav Petkov
2016-09-14 14:31     ` Tom Lendacky
2016-09-14 14:31       ` Tom Lendacky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=cb3e6b91-44f7-ee01-6da1-82eb32243b85@amd.com \
    --to=thomas.lendacky@amd.com \
    --cc=arnd@arndb.de \
    --cc=aryabinin@virtuozzo.com \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=hpa@zytor.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=kasan-dev@googlegroups.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-efi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=matt@codeblueprint.co.uk \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.