All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tom Lendacky <thomas.lendacky@amd.com>
To: Christoph Hellwig <hch@lst.de>, x86@kernel.org
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	David Woodhouse <dwmw2@infradead.org>,
	Muli Ben-Yehuda <mulix@mulix.org>, Jon Mason <jdmason@kudzu.us>,
	Joerg Roedel <joro@8bytes.org>,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH 13/14] dma-direct: handle force decryption for dma coherent buffers in common code
Date: Mon, 19 Mar 2018 09:51:46 -0500	[thread overview]
Message-ID: <aa92adb4-2d93-831c-3f62-9f7281e18fc9@amd.com> (raw)
In-Reply-To: <20180319103826.12853-14-hch@lst.de>

On 3/19/2018 5:38 AM, Christoph Hellwig wrote:
> With that in place the generic dma-direct routines can be used to
> allocate non-encrypted bounce buffers, and the x86 SEV case can use
> the generic swiotlb ops including nice features such as using CMA
> allocations.
> 
> Note that I'm not too happy about using sev_active() in dma-direct, but
> I couldn't come up with a good enough name for a wrapper to make it
> worth adding.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>

> ---
>  arch/x86/mm/mem_encrypt.c | 73 ++---------------------------------------------
>  lib/dma-direct.c          | 32 +++++++++++++++++----
>  2 files changed, 29 insertions(+), 76 deletions(-)
> 
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 1a05bea831a8..65f45e0ef496 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -200,58 +200,6 @@ void __init sme_early_init(void)
>  		swiotlb_force = SWIOTLB_FORCE;
>  }
>  
> -static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> -		       gfp_t gfp, unsigned long attrs)
> -{
> -	unsigned int order;
> -	struct page *page;
> -	void *vaddr = NULL;
> -
> -	order = get_order(size);
> -	page = alloc_pages_node(dev_to_node(dev), gfp, order);
> -	if (page) {
> -		dma_addr_t addr;
> -
> -		/*
> -		 * Since we will be clearing the encryption bit, check the
> -		 * mask with it already cleared.
> -		 */
> -		addr = __phys_to_dma(dev, page_to_phys(page));
> -		if ((addr + size) > dev->coherent_dma_mask) {
> -			__free_pages(page, get_order(size));
> -		} else {
> -			vaddr = page_address(page);
> -			*dma_handle = addr;
> -		}
> -	}
> -
> -	if (!vaddr)
> -		vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
> -
> -	if (!vaddr)
> -		return NULL;
> -
> -	/* Clear the SME encryption bit for DMA use if not swiotlb area */
> -	if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
> -		set_memory_decrypted((unsigned long)vaddr, 1 << order);
> -		memset(vaddr, 0, PAGE_SIZE << order);
> -		*dma_handle = __sme_clr(*dma_handle);
> -	}
> -
> -	return vaddr;
> -}
> -
> -static void sev_free(struct device *dev, size_t size, void *vaddr,
> -		     dma_addr_t dma_handle, unsigned long attrs)
> -{
> -	/* Set the SME encryption bit for re-use if not swiotlb area */
> -	if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
> -		set_memory_encrypted((unsigned long)vaddr,
> -				     1 << get_order(size));
> -
> -	swiotlb_free_coherent(dev, size, vaddr, dma_handle);
> -}
> -
>  static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
>  {
>  	pgprot_t old_prot, new_prot;
> @@ -404,20 +352,6 @@ bool sev_active(void)
>  }
>  EXPORT_SYMBOL(sev_active);
>  
> -static const struct dma_map_ops sev_dma_ops = {
> -	.alloc                  = sev_alloc,
> -	.free                   = sev_free,
> -	.map_page               = swiotlb_map_page,
> -	.unmap_page             = swiotlb_unmap_page,
> -	.map_sg                 = swiotlb_map_sg_attrs,
> -	.unmap_sg               = swiotlb_unmap_sg_attrs,
> -	.sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
> -	.sync_single_for_device = swiotlb_sync_single_for_device,
> -	.sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
> -	.sync_sg_for_device     = swiotlb_sync_sg_for_device,
> -	.mapping_error          = swiotlb_dma_mapping_error,
> -};
> -
>  /* Architecture __weak replacement functions */
>  void __init mem_encrypt_init(void)
>  {
> @@ -428,12 +362,11 @@ void __init mem_encrypt_init(void)
>  	swiotlb_update_mem_attributes();
>  
>  	/*
> -	 * With SEV, DMA operations cannot use encryption. New DMA ops
> -	 * are required in order to mark the DMA areas as decrypted or
> -	 * to use bounce buffers.
> +	 * With SEV, DMA operations cannot use encryption, we need to use
> +	 * SWIOTLB to bounce buffer DMA operation.
>  	 */
>  	if (sev_active())
> -		dma_ops = &sev_dma_ops;
> +		dma_ops = &swiotlb_dma_ops;
>  
>  	/*
>  	 * With SEV, we need to unroll the rep string I/O instructions.
> diff --git a/lib/dma-direct.c b/lib/dma-direct.c
> index c9e8e21cb334..1277d293d4da 100644
> --- a/lib/dma-direct.c
> +++ b/lib/dma-direct.c
> @@ -9,6 +9,7 @@
>  #include <linux/scatterlist.h>
>  #include <linux/dma-contiguous.h>
>  #include <linux/pfn.h>
> +#include <linux/set_memory.h>
>  
>  #define DIRECT_MAPPING_ERROR		0
>  
> @@ -20,6 +21,14 @@
>  #define ARCH_ZONE_DMA_BITS 24
>  #endif
>  
> +/*
> + * For AMD SEV all DMA must be to unencrypted addresses.
> + */
> +static inline bool force_dma_unencrypted(void)
> +{
> +	return sev_active();
> +}
> +
>  static bool
>  check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
>  		const char *caller)
> @@ -37,7 +46,9 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
>  
>  static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
>  {
> -	return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
> +	dma_addr_t addr = force_dma_unencrypted() ?
> +		__phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
> +	return addr + size - 1 <= dev->coherent_dma_mask;
>  }
>  
>  void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> @@ -46,6 +57,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
>  	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
>  	int page_order = get_order(size);
>  	struct page *page = NULL;
> +	void *ret;
>  
>  	/* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
>  	if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
> @@ -78,10 +90,15 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
>  
>  	if (!page)
>  		return NULL;
> -
> -	*dma_handle = phys_to_dma(dev, page_to_phys(page));
> -	memset(page_address(page), 0, size);
> -	return page_address(page);
> +	ret = page_address(page);
> +	if (force_dma_unencrypted()) {
> +		set_memory_decrypted((unsigned long)ret, 1 << page_order);
> +		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
> +	} else {
> +		*dma_handle = phys_to_dma(dev, page_to_phys(page));
> +	}
> +	memset(ret, 0, size);
> +	return ret;
>  }
>  
>  /*
> @@ -92,9 +109,12 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
>  		dma_addr_t dma_addr, unsigned long attrs)
>  {
>  	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> +	unsigned int page_order = get_order(size);
>  
> +	if (force_dma_unencrypted())
> +		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
>  	if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
> -		free_pages((unsigned long)cpu_addr, get_order(size));
> +		free_pages((unsigned long)cpu_addr, page_order);
>  }
>  
>  static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
> 

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>
To: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>,
	x86-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org
Cc: Konrad Rzeszutek Wilk
	<konrad.wilk-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	Muli Ben-Yehuda <mulix-BzGcCpaT2IbYtjvyW6yDsg@public.gmane.org>,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	David Woodhouse <dwmw2-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
Subject: Re: [PATCH 13/14] dma-direct: handle force decryption for dma coherent buffers in common code
Date: Mon, 19 Mar 2018 09:51:46 -0500	[thread overview]
Message-ID: <aa92adb4-2d93-831c-3f62-9f7281e18fc9@amd.com> (raw)
In-Reply-To: <20180319103826.12853-14-hch-jcswGhMUV9g@public.gmane.org>

On 3/19/2018 5:38 AM, Christoph Hellwig wrote:
> With that in place the generic dma-direct routines can be used to
> allocate non-encrypted bounce buffers, and the x86 SEV case can use
> the generic swiotlb ops including nice features such as using CMA
> allocations.
> 
> Note that I'm not too happy about using sev_active() in dma-direct, but
> I couldn't come up with a good enough name for a wrapper to make it
> worth adding.
> 
> Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>

Reviewed-by: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>

> ---
>  arch/x86/mm/mem_encrypt.c | 73 ++---------------------------------------------
>  lib/dma-direct.c          | 32 +++++++++++++++++----
>  2 files changed, 29 insertions(+), 76 deletions(-)
> 
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 1a05bea831a8..65f45e0ef496 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -200,58 +200,6 @@ void __init sme_early_init(void)
>  		swiotlb_force = SWIOTLB_FORCE;
>  }
>  
> -static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> -		       gfp_t gfp, unsigned long attrs)
> -{
> -	unsigned int order;
> -	struct page *page;
> -	void *vaddr = NULL;
> -
> -	order = get_order(size);
> -	page = alloc_pages_node(dev_to_node(dev), gfp, order);
> -	if (page) {
> -		dma_addr_t addr;
> -
> -		/*
> -		 * Since we will be clearing the encryption bit, check the
> -		 * mask with it already cleared.
> -		 */
> -		addr = __phys_to_dma(dev, page_to_phys(page));
> -		if ((addr + size) > dev->coherent_dma_mask) {
> -			__free_pages(page, get_order(size));
> -		} else {
> -			vaddr = page_address(page);
> -			*dma_handle = addr;
> -		}
> -	}
> -
> -	if (!vaddr)
> -		vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
> -
> -	if (!vaddr)
> -		return NULL;
> -
> -	/* Clear the SME encryption bit for DMA use if not swiotlb area */
> -	if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
> -		set_memory_decrypted((unsigned long)vaddr, 1 << order);
> -		memset(vaddr, 0, PAGE_SIZE << order);
> -		*dma_handle = __sme_clr(*dma_handle);
> -	}
> -
> -	return vaddr;
> -}
> -
> -static void sev_free(struct device *dev, size_t size, void *vaddr,
> -		     dma_addr_t dma_handle, unsigned long attrs)
> -{
> -	/* Set the SME encryption bit for re-use if not swiotlb area */
> -	if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
> -		set_memory_encrypted((unsigned long)vaddr,
> -				     1 << get_order(size));
> -
> -	swiotlb_free_coherent(dev, size, vaddr, dma_handle);
> -}
> -
>  static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
>  {
>  	pgprot_t old_prot, new_prot;
> @@ -404,20 +352,6 @@ bool sev_active(void)
>  }
>  EXPORT_SYMBOL(sev_active);
>  
> -static const struct dma_map_ops sev_dma_ops = {
> -	.alloc                  = sev_alloc,
> -	.free                   = sev_free,
> -	.map_page               = swiotlb_map_page,
> -	.unmap_page             = swiotlb_unmap_page,
> -	.map_sg                 = swiotlb_map_sg_attrs,
> -	.unmap_sg               = swiotlb_unmap_sg_attrs,
> -	.sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
> -	.sync_single_for_device = swiotlb_sync_single_for_device,
> -	.sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
> -	.sync_sg_for_device     = swiotlb_sync_sg_for_device,
> -	.mapping_error          = swiotlb_dma_mapping_error,
> -};
> -
>  /* Architecture __weak replacement functions */
>  void __init mem_encrypt_init(void)
>  {
> @@ -428,12 +362,11 @@ void __init mem_encrypt_init(void)
>  	swiotlb_update_mem_attributes();
>  
>  	/*
> -	 * With SEV, DMA operations cannot use encryption. New DMA ops
> -	 * are required in order to mark the DMA areas as decrypted or
> -	 * to use bounce buffers.
> +	 * With SEV, DMA operations cannot use encryption, we need to use
> +	 * SWIOTLB to bounce buffer DMA operation.
>  	 */
>  	if (sev_active())
> -		dma_ops = &sev_dma_ops;
> +		dma_ops = &swiotlb_dma_ops;
>  
>  	/*
>  	 * With SEV, we need to unroll the rep string I/O instructions.
> diff --git a/lib/dma-direct.c b/lib/dma-direct.c
> index c9e8e21cb334..1277d293d4da 100644
> --- a/lib/dma-direct.c
> +++ b/lib/dma-direct.c
> @@ -9,6 +9,7 @@
>  #include <linux/scatterlist.h>
>  #include <linux/dma-contiguous.h>
>  #include <linux/pfn.h>
> +#include <linux/set_memory.h>
>  
>  #define DIRECT_MAPPING_ERROR		0
>  
> @@ -20,6 +21,14 @@
>  #define ARCH_ZONE_DMA_BITS 24
>  #endif
>  
> +/*
> + * For AMD SEV all DMA must be to unencrypted addresses.
> + */
> +static inline bool force_dma_unencrypted(void)
> +{
> +	return sev_active();
> +}
> +
>  static bool
>  check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
>  		const char *caller)
> @@ -37,7 +46,9 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
>  
>  static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
>  {
> -	return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
> +	dma_addr_t addr = force_dma_unencrypted() ?
> +		__phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
> +	return addr + size - 1 <= dev->coherent_dma_mask;
>  }
>  
>  void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> @@ -46,6 +57,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
>  	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
>  	int page_order = get_order(size);
>  	struct page *page = NULL;
> +	void *ret;
>  
>  	/* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
>  	if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
> @@ -78,10 +90,15 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
>  
>  	if (!page)
>  		return NULL;
> -
> -	*dma_handle = phys_to_dma(dev, page_to_phys(page));
> -	memset(page_address(page), 0, size);
> -	return page_address(page);
> +	ret = page_address(page);
> +	if (force_dma_unencrypted()) {
> +		set_memory_decrypted((unsigned long)ret, 1 << page_order);
> +		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
> +	} else {
> +		*dma_handle = phys_to_dma(dev, page_to_phys(page));
> +	}
> +	memset(ret, 0, size);
> +	return ret;
>  }
>  
>  /*
> @@ -92,9 +109,12 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
>  		dma_addr_t dma_addr, unsigned long attrs)
>  {
>  	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> +	unsigned int page_order = get_order(size);
>  
> +	if (force_dma_unencrypted())
> +		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
>  	if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
> -		free_pages((unsigned long)cpu_addr, get_order(size));
> +		free_pages((unsigned long)cpu_addr, page_order);
>  }
>  
>  static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
> 

  reply	other threads:[~2018-03-19 14:51 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-19 10:38 use generic dma-direct and swiotlb code for x86 V3 Christoph Hellwig
2018-03-19 10:38 ` Christoph Hellwig
2018-03-19 10:38 ` [PATCH 01/14] x86: remove X86_PPRO_FENCE Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-20 11:04   ` [tip:x86/pti] x86/cpu: Remove the CONFIG_X86_PPRO_FENCE=y quirk tip-bot for Christoph Hellwig
2018-03-20 12:51     ` Peter Zijlstra
2018-03-19 10:38 ` [PATCH 02/14] x86: remove dma_alloc_coherent_mask Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:48   ` [tip:x86/dma] x86/dma: Remove dma_alloc_coherent_mask() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 03/14] x86: use dma-direct Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:49   ` [tip:x86/dma] x86/dma: Use DMA-direct (CONFIG_DMA_DIRECT_OPS=y) tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 04/14] x86: use generic swiotlb_ops Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:49   ` [tip:x86/dma] x86/dma: Use " tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 05/14] x86/amd_gart: look at coherent_dma_mask instead of GFP_DMA Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:50   ` [tip:x86/dma] x86/dma/amd_gart: Look at dev->coherent_dma_mask " tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 06/14] x86/amd_gart: use dma_direct_{alloc,free} Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:50   ` [tip:x86/dma] x86/dma/amd_gart: Use dma_direct_{alloc,free}() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 07/14] iommu/amd_iommu: use dma_direct_{alloc,free} Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:51   ` [tip:x86/dma] iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 08/14] iommu/intel-iommu: cleanup intel_{alloc,free}_coherent Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:51   ` [tip:x86/dma] iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 09/14] x86: remove dma_alloc_coherent_gfp_flags Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:52   ` [tip:x86/dma] x86/dma: Remove dma_alloc_coherent_gfp_flags() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 10/14] set_memory.h: provide set_memory_{en,de}crypted stubs Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-19 14:21   ` Tom Lendacky
2018-03-19 14:21     ` Tom Lendacky
2018-03-23 19:52   ` [tip:x86/dma] set_memory.h: Provide set_memory_{en,de}crypted() stubs tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 11/14] swiotlb: remove swiotlb_set_mem_attributes Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-19 14:41   ` Tom Lendacky
2018-03-19 14:41     ` Tom Lendacky
2018-03-23 19:52   ` [tip:x86/dma] dma/swiotlb: Remove swiotlb_set_mem_attributes() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 12/14] dma-direct: handle the memory encryption bit in common code Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-19 14:50   ` Tom Lendacky
2018-03-19 14:50     ` Tom Lendacky
2018-03-19 15:19   ` Robin Murphy
2018-03-19 15:19     ` Robin Murphy
2018-03-19 15:24     ` Christoph Hellwig
2018-03-19 15:24       ` Christoph Hellwig
2018-03-19 15:37       ` Robin Murphy
2018-03-19 15:37         ` Robin Murphy
2018-03-19 15:48         ` Will Deacon
2018-03-19 15:48           ` Will Deacon
2018-03-19 16:03           ` Christoph Hellwig
2018-03-19 16:03             ` Christoph Hellwig
2018-03-19 16:55             ` Will Deacon
2018-03-19 16:55               ` Will Deacon
2018-03-19 18:01             ` Catalin Marinas
2018-03-19 18:01               ` Catalin Marinas
2018-03-19 19:49               ` Christoph Hellwig
2018-03-19 19:49                 ` Christoph Hellwig
2018-03-20 16:23                 ` Catalin Marinas
2018-03-20 16:23                   ` Catalin Marinas
2018-03-23 19:53   ` [tip:x86/dma] dma/direct: Handle " tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 13/14] dma-direct: handle force decryption for dma coherent buffers " Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-19 14:51   ` Tom Lendacky [this message]
2018-03-19 14:51     ` Tom Lendacky
2018-03-23 19:53   ` [tip:x86/dma] dma/direct: Handle force decryption for DMA " tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 14/14] swiotlb: remove swiotlb_{alloc,free}_coherent Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:54   ` [tip:x86/dma] dma/swiotlb: Remove swiotlb_{alloc,free}_coherent() tip-bot for Christoph Hellwig
2018-03-19 14:00 ` use generic dma-direct and swiotlb code for x86 V3 Tom Lendacky
2018-03-19 14:00   ` Tom Lendacky
2018-03-19 14:56 ` Thomas Gleixner
2018-03-19 14:56   ` Thomas Gleixner
2018-03-19 15:27 ` Konrad Rzeszutek Wilk
2018-03-19 15:27   ` Konrad Rzeszutek Wilk
2018-03-19 15:28   ` Christoph Hellwig
2018-03-20  8:37     ` Ingo Molnar
2018-03-20  8:37       ` Ingo Molnar
2018-03-20  8:44       ` Christoph Hellwig
2018-03-20  8:44         ` Christoph Hellwig
2018-03-20  9:03         ` Ingo Molnar
2018-03-20  9:03           ` Ingo Molnar
     [not found]           ` <20180320090351.2qnwcsauhodrqxdj-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-03-20 11:25             ` Konrad Rzeszutek Wilk
2018-03-20 15:16               ` Christoph Hellwig
2018-03-20 15:16                 ` Christoph Hellwig
2018-03-21 14:32                 ` Konrad Rzeszutek Wilk
2018-03-21 14:32                   ` Konrad Rzeszutek Wilk
  -- strict thread matches above, loose matches on Subject: below --
2018-03-14 17:51 use generic dma-direct and swiotlb code for x86 V2 Christoph Hellwig
2018-03-14 17:52 ` [PATCH 13/14] dma-direct: handle force decryption for dma coherent buffers in common code Christoph Hellwig
2018-03-14 17:52   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aa92adb4-2d93-831c-3f62-9f7281e18fc9@amd.com \
    --to=thomas.lendacky@amd.com \
    --cc=dwmw2@infradead.org \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jdmason@kudzu.us \
    --cc=joro@8bytes.org \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mulix@mulix.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.