All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tom Lendacky <thomas.lendacky@amd.com>
To: Christoph Hellwig <hch@lst.de>, x86@kernel.org
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	David Woodhouse <dwmw2@infradead.org>,
	Muli Ben-Yehuda <mulix@mulix.org>, Jon Mason <jdmason@kudzu.us>,
	Joerg Roedel <joro@8bytes.org>,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH 12/14] dma-direct: handle the memory encryption bit in common code
Date: Mon, 19 Mar 2018 09:50:27 -0500	[thread overview]
Message-ID: <6d05efef-3e2a-5875-aed6-0680d5b5e5be@amd.com> (raw)
In-Reply-To: <20180319103826.12853-13-hch@lst.de>

On 3/19/2018 5:38 AM, Christoph Hellwig wrote:
> Give the basic phys_to_dma and dma_to_phys helpers a __-prefix and add
> the memory encryption mask to the non-prefixed versions.  Use the
> __-prefixed versions directly instead of clearing the mask again in
> various places.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>

> ---
>  arch/arm/include/asm/dma-direct.h                  |  4 ++--
>  arch/mips/cavium-octeon/dma-octeon.c               | 10 ++++-----
>  .../include/asm/mach-cavium-octeon/dma-coherence.h |  4 ++--
>  .../include/asm/mach-loongson64/dma-coherence.h    | 10 ++++-----
>  arch/mips/loongson64/common/dma-swiotlb.c          |  4 ++--
>  arch/powerpc/include/asm/dma-direct.h              |  4 ++--
>  arch/x86/Kconfig                                   |  2 +-
>  arch/x86/include/asm/dma-direct.h                  | 25 ++--------------------
>  arch/x86/mm/mem_encrypt.c                          |  2 +-
>  arch/x86/pci/sta2x11-fixup.c                       |  6 +++---
>  include/linux/dma-direct.h                         | 21 ++++++++++++++++--
>  lib/swiotlb.c                                      | 25 ++++++++--------------
>  12 files changed, 53 insertions(+), 64 deletions(-)
> 
> diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
> index 5b0a8a421894..b67e5fc1fe43 100644
> --- a/arch/arm/include/asm/dma-direct.h
> +++ b/arch/arm/include/asm/dma-direct.h
> @@ -2,13 +2,13 @@
>  #ifndef ASM_ARM_DMA_DIRECT_H
>  #define ASM_ARM_DMA_DIRECT_H 1
>  
> -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	unsigned int offset = paddr & ~PAGE_MASK;
>  	return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
>  }
>  
> -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
> +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
>  {
>  	unsigned int offset = dev_addr & ~PAGE_MASK;
>  	return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
> diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
> index c7bb8a407041..7b335ab21697 100644
> --- a/arch/mips/cavium-octeon/dma-octeon.c
> +++ b/arch/mips/cavium-octeon/dma-octeon.c
> @@ -10,7 +10,7 @@
>   * IP32 changes by Ilya.
>   * Copyright (C) 2010 Cavium Networks, Inc.
>   */
> -#include <linux/dma-mapping.h>
> +#include <linux/dma-direct.h>
>  #include <linux/scatterlist.h>
>  #include <linux/bootmem.h>
>  #include <linux/export.h>
> @@ -182,7 +182,7 @@ struct octeon_dma_map_ops {
>  	phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
>  };
>  
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
>  						      struct octeon_dma_map_ops,
> @@ -190,9 +190,9 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
>  
>  	return ops->phys_to_dma(dev, paddr);
>  }
> -EXPORT_SYMBOL(phys_to_dma);
> +EXPORT_SYMBOL(__phys_to_dma);
>  
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
>  {
>  	struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
>  						      struct octeon_dma_map_ops,
> @@ -200,7 +200,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
>  
>  	return ops->dma_to_phys(dev, daddr);
>  }
> -EXPORT_SYMBOL(dma_to_phys);
> +EXPORT_SYMBOL(__dma_to_phys);
>  
>  static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
>  	.dma_map_ops = {
> diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
> index 138edf6b5b48..6eb1ee548b11 100644
> --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
> +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
> @@ -69,8 +69,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  	return addr + size - 1 <= *dev->dma_mask;
>  }
>  
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
>  
>  struct dma_map_ops;
>  extern const struct dma_map_ops *octeon_pci_dma_map_ops;
> diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
> index b1b575f5c6c1..64fc44dec0a8 100644
> --- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
> +++ b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
> @@ -25,13 +25,13 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  	return addr + size - 1 <= *dev->dma_mask;
>  }
>  
> -extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
> -extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
> +extern dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
> +extern phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
>  static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
>  					  size_t size)
>  {
>  #ifdef CONFIG_CPU_LOONGSON3
> -	return phys_to_dma(dev, virt_to_phys(addr));
> +	return __phys_to_dma(dev, virt_to_phys(addr));
>  #else
>  	return virt_to_phys(addr) | 0x80000000;
>  #endif
> @@ -41,7 +41,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
>  					       struct page *page)
>  {
>  #ifdef CONFIG_CPU_LOONGSON3
> -	return phys_to_dma(dev, page_to_phys(page));
> +	return __phys_to_dma(dev, page_to_phys(page));
>  #else
>  	return page_to_phys(page) | 0x80000000;
>  #endif
> @@ -51,7 +51,7 @@ static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
>  	dma_addr_t dma_addr)
>  {
>  #if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
> -	return dma_to_phys(dev, dma_addr);
> +	return __dma_to_phys(dev, dma_addr);
>  #elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
>  	return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
>  #else
> diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
> index 7bbcf89475f3..6a739f8ae110 100644
> --- a/arch/mips/loongson64/common/dma-swiotlb.c
> +++ b/arch/mips/loongson64/common/dma-swiotlb.c
> @@ -63,7 +63,7 @@ static int loongson_dma_supported(struct device *dev, u64 mask)
>  	return swiotlb_dma_supported(dev, mask);
>  }
>  
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	long nid;
>  #ifdef CONFIG_PHYS48_TO_HT40
> @@ -75,7 +75,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
>  	return paddr;
>  }
>  
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
>  {
>  	long nid;
>  #ifdef CONFIG_PHYS48_TO_HT40
> diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
> index a5b59c765426..7702875aabb7 100644
> --- a/arch/powerpc/include/asm/dma-direct.h
> +++ b/arch/powerpc/include/asm/dma-direct.h
> @@ -17,12 +17,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  	return addr + size - 1 <= *dev->dma_mask;
>  }
>  
> -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	return paddr + get_dma_offset(dev);
>  }
>  
> -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
>  {
>  	return daddr - get_dma_offset(dev);
>  }
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 10f482beda1b..1ca4f0874517 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -54,7 +54,6 @@ config X86
>  	select ARCH_HAS_FORTIFY_SOURCE
>  	select ARCH_HAS_GCOV_PROFILE_ALL
>  	select ARCH_HAS_KCOV			if X86_64
> -	select ARCH_HAS_PHYS_TO_DMA
>  	select ARCH_HAS_MEMBARRIER_SYNC_CORE
>  	select ARCH_HAS_PMEM_API		if X86_64
>  	select ARCH_HAS_REFCOUNT
> @@ -692,6 +691,7 @@ config X86_SUPPORTS_MEMORY_FAILURE
>  config STA2X11
>  	bool "STA2X11 Companion Chip Support"
>  	depends on X86_32_NON_STANDARD && PCI
> +	select ARCH_HAS_PHYS_TO_DMA
>  	select X86_DEV_DMA_OPS
>  	select X86_DMA_REMAP
>  	select SWIOTLB
> diff --git a/arch/x86/include/asm/dma-direct.h b/arch/x86/include/asm/dma-direct.h
> index 1295bc622ebe..1a19251eaac9 100644
> --- a/arch/x86/include/asm/dma-direct.h
> +++ b/arch/x86/include/asm/dma-direct.h
> @@ -2,29 +2,8 @@
>  #ifndef ASM_X86_DMA_DIRECT_H
>  #define ASM_X86_DMA_DIRECT_H 1
>  
> -#include <linux/mem_encrypt.h>
> -
> -#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
>  bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
> -#else
> -static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
> -{
> -	if (!dev->dma_mask)
> -		return 0;
> -
> -	return addr + size - 1 <= *dev->dma_mask;
> -}
> -
> -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> -{
> -	return __sme_set(paddr);
> -}
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
>  
> -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> -{
> -	return __sme_clr(daddr);
> -}
> -#endif /* CONFIG_X86_DMA_REMAP */
>  #endif /* ASM_X86_DMA_DIRECT_H */
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index d3b80d5f9828..1a05bea831a8 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -216,7 +216,7 @@ static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
>  		 * Since we will be clearing the encryption bit, check the
>  		 * mask with it already cleared.
>  		 */
> -		addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
> +		addr = __phys_to_dma(dev, page_to_phys(page));
>  		if ((addr + size) > dev->coherent_dma_mask) {
>  			__free_pages(page, get_order(size));
>  		} else {
> diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
> index eac58e03f43c..7a5bafb76d77 100644
> --- a/arch/x86/pci/sta2x11-fixup.c
> +++ b/arch/x86/pci/sta2x11-fixup.c
> @@ -207,11 +207,11 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  }
>  
>  /**
> - * phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
> + * __phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
>   * @dev: device for a PCI device
>   * @paddr: Physical address
>   */
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	if (!dev->archdata.is_sta2x11)
>  		return paddr;
> @@ -223,7 +223,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
>   * @dev: device for a PCI device
>   * @daddr: STA2x11 AMBA DMA address
>   */
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
>  {
>  	if (!dev->archdata.is_sta2x11)
>  		return daddr;
> diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
> index bcdb1a3e4b1f..53ad6a47f513 100644
> --- a/include/linux/dma-direct.h
> +++ b/include/linux/dma-direct.h
> @@ -3,18 +3,19 @@
>  #define _LINUX_DMA_DIRECT_H 1
>  
>  #include <linux/dma-mapping.h>
> +#include <linux/mem_encrypt.h>
>  
>  #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
>  #include <asm/dma-direct.h>
>  #else
> -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	dma_addr_t dev_addr = (dma_addr_t)paddr;
>  
>  	return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
>  }
>  
> -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
> +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
>  {
>  	phys_addr_t paddr = (phys_addr_t)dev_addr;
>  
> @@ -30,6 +31,22 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  }
>  #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
>  
> +/*
> + * If memory encryption is supported, phys_to_dma will set the memory encryption
> + * bit in the DMA address, and dma_to_phys will clear it.  The raw __phys_to_dma
> + * and __dma_to_phys versions should only be used on non-encrypted memory for
> + * special occasions like DMA coherent buffers.
> + */
> +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +{
> +	return __sme_set(__phys_to_dma(dev, paddr));
> +}
> +
> +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +{
> +	return __sme_clr(__dma_to_phys(dev, daddr));
> +}
> +
>  #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
>  void dma_mark_clean(void *addr, size_t size);
>  #else
> diff --git a/lib/swiotlb.c b/lib/swiotlb.c
> index 005d1d87bb2e..8b06b4485e65 100644
> --- a/lib/swiotlb.c
> +++ b/lib/swiotlb.c
> @@ -157,13 +157,6 @@ unsigned long swiotlb_size_or_default(void)
>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>  }
>  
> -/* For swiotlb, clear memory encryption mask from dma addresses */
> -static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
> -				      phys_addr_t address)
> -{
> -	return __sme_clr(phys_to_dma(hwdev, address));
> -}
> -
>  /* Note that this doesn't work with highmem page */
>  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
>  				      volatile void *address)
> @@ -622,7 +615,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
>  		return SWIOTLB_MAP_ERROR;
>  	}
>  
> -	start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
> +	start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
>  	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
>  				      dir, attrs);
>  }
> @@ -726,12 +719,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
>  		goto out_warn;
>  
>  	phys_addr = swiotlb_tbl_map_single(dev,
> -			swiotlb_phys_to_dma(dev, io_tlb_start),
> +			__phys_to_dma(dev, io_tlb_start),
>  			0, size, DMA_FROM_DEVICE, 0);
>  	if (phys_addr == SWIOTLB_MAP_ERROR)
>  		goto out_warn;
>  
> -	*dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
> +	*dma_handle = __phys_to_dma(dev, phys_addr);
>  	if (dma_coherent_ok(dev, *dma_handle, size))
>  		goto out_unmap;
>  
> @@ -867,10 +860,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
>  	map = map_single(dev, phys, size, dir, attrs);
>  	if (map == SWIOTLB_MAP_ERROR) {
>  		swiotlb_full(dev, size, dir, 1);
> -		return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
> +		return __phys_to_dma(dev, io_tlb_overflow_buffer);
>  	}
>  
> -	dev_addr = swiotlb_phys_to_dma(dev, map);
> +	dev_addr = __phys_to_dma(dev, map);
>  
>  	/* Ensure that the address returned is DMA'ble */
>  	if (dma_capable(dev, dev_addr, size))
> @@ -879,7 +872,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
>  	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
>  	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
>  
> -	return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
> +	return __phys_to_dma(dev, io_tlb_overflow_buffer);
>  }
>  
>  /*
> @@ -1009,7 +1002,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
>  				sg_dma_len(sgl) = 0;
>  				return 0;
>  			}
> -			sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
> +			sg->dma_address = __phys_to_dma(hwdev, map);
>  		} else
>  			sg->dma_address = dev_addr;
>  		sg_dma_len(sg) = sg->length;
> @@ -1073,7 +1066,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
>  int
>  swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
>  {
> -	return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
> +	return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
>  }
>  
>  /*
> @@ -1085,7 +1078,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
>  int
>  swiotlb_dma_supported(struct device *hwdev, u64 mask)
>  {
> -	return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
> +	return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
>  }
>  
>  #ifdef CONFIG_DMA_DIRECT_OPS
> 

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>
To: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>,
	x86-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org
Cc: Konrad Rzeszutek Wilk
	<konrad.wilk-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	Muli Ben-Yehuda <mulix-BzGcCpaT2IbYtjvyW6yDsg@public.gmane.org>,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	David Woodhouse <dwmw2-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
Subject: Re: [PATCH 12/14] dma-direct: handle the memory encryption bit in common code
Date: Mon, 19 Mar 2018 09:50:27 -0500	[thread overview]
Message-ID: <6d05efef-3e2a-5875-aed6-0680d5b5e5be@amd.com> (raw)
In-Reply-To: <20180319103826.12853-13-hch-jcswGhMUV9g@public.gmane.org>

On 3/19/2018 5:38 AM, Christoph Hellwig wrote:
> Give the basic phys_to_dma and dma_to_phys helpers a __-prefix and add
> the memory encryption mask to the non-prefixed versions.  Use the
> __-prefixed versions directly instead of clearing the mask again in
> various places.
> 
> Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>

Reviewed-by: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>

> ---
>  arch/arm/include/asm/dma-direct.h                  |  4 ++--
>  arch/mips/cavium-octeon/dma-octeon.c               | 10 ++++-----
>  .../include/asm/mach-cavium-octeon/dma-coherence.h |  4 ++--
>  .../include/asm/mach-loongson64/dma-coherence.h    | 10 ++++-----
>  arch/mips/loongson64/common/dma-swiotlb.c          |  4 ++--
>  arch/powerpc/include/asm/dma-direct.h              |  4 ++--
>  arch/x86/Kconfig                                   |  2 +-
>  arch/x86/include/asm/dma-direct.h                  | 25 ++--------------------
>  arch/x86/mm/mem_encrypt.c                          |  2 +-
>  arch/x86/pci/sta2x11-fixup.c                       |  6 +++---
>  include/linux/dma-direct.h                         | 21 ++++++++++++++++--
>  lib/swiotlb.c                                      | 25 ++++++++--------------
>  12 files changed, 53 insertions(+), 64 deletions(-)
> 
> diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
> index 5b0a8a421894..b67e5fc1fe43 100644
> --- a/arch/arm/include/asm/dma-direct.h
> +++ b/arch/arm/include/asm/dma-direct.h
> @@ -2,13 +2,13 @@
>  #ifndef ASM_ARM_DMA_DIRECT_H
>  #define ASM_ARM_DMA_DIRECT_H 1
>  
> -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	unsigned int offset = paddr & ~PAGE_MASK;
>  	return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
>  }
>  
> -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
> +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
>  {
>  	unsigned int offset = dev_addr & ~PAGE_MASK;
>  	return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
> diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
> index c7bb8a407041..7b335ab21697 100644
> --- a/arch/mips/cavium-octeon/dma-octeon.c
> +++ b/arch/mips/cavium-octeon/dma-octeon.c
> @@ -10,7 +10,7 @@
>   * IP32 changes by Ilya.
>   * Copyright (C) 2010 Cavium Networks, Inc.
>   */
> -#include <linux/dma-mapping.h>
> +#include <linux/dma-direct.h>
>  #include <linux/scatterlist.h>
>  #include <linux/bootmem.h>
>  #include <linux/export.h>
> @@ -182,7 +182,7 @@ struct octeon_dma_map_ops {
>  	phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
>  };
>  
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
>  						      struct octeon_dma_map_ops,
> @@ -190,9 +190,9 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
>  
>  	return ops->phys_to_dma(dev, paddr);
>  }
> -EXPORT_SYMBOL(phys_to_dma);
> +EXPORT_SYMBOL(__phys_to_dma);
>  
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
>  {
>  	struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
>  						      struct octeon_dma_map_ops,
> @@ -200,7 +200,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
>  
>  	return ops->dma_to_phys(dev, daddr);
>  }
> -EXPORT_SYMBOL(dma_to_phys);
> +EXPORT_SYMBOL(__dma_to_phys);
>  
>  static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
>  	.dma_map_ops = {
> diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
> index 138edf6b5b48..6eb1ee548b11 100644
> --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
> +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
> @@ -69,8 +69,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  	return addr + size - 1 <= *dev->dma_mask;
>  }
>  
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
>  
>  struct dma_map_ops;
>  extern const struct dma_map_ops *octeon_pci_dma_map_ops;
> diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
> index b1b575f5c6c1..64fc44dec0a8 100644
> --- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
> +++ b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
> @@ -25,13 +25,13 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  	return addr + size - 1 <= *dev->dma_mask;
>  }
>  
> -extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
> -extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
> +extern dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
> +extern phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
>  static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
>  					  size_t size)
>  {
>  #ifdef CONFIG_CPU_LOONGSON3
> -	return phys_to_dma(dev, virt_to_phys(addr));
> +	return __phys_to_dma(dev, virt_to_phys(addr));
>  #else
>  	return virt_to_phys(addr) | 0x80000000;
>  #endif
> @@ -41,7 +41,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
>  					       struct page *page)
>  {
>  #ifdef CONFIG_CPU_LOONGSON3
> -	return phys_to_dma(dev, page_to_phys(page));
> +	return __phys_to_dma(dev, page_to_phys(page));
>  #else
>  	return page_to_phys(page) | 0x80000000;
>  #endif
> @@ -51,7 +51,7 @@ static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
>  	dma_addr_t dma_addr)
>  {
>  #if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
> -	return dma_to_phys(dev, dma_addr);
> +	return __dma_to_phys(dev, dma_addr);
>  #elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
>  	return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
>  #else
> diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
> index 7bbcf89475f3..6a739f8ae110 100644
> --- a/arch/mips/loongson64/common/dma-swiotlb.c
> +++ b/arch/mips/loongson64/common/dma-swiotlb.c
> @@ -63,7 +63,7 @@ static int loongson_dma_supported(struct device *dev, u64 mask)
>  	return swiotlb_dma_supported(dev, mask);
>  }
>  
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	long nid;
>  #ifdef CONFIG_PHYS48_TO_HT40
> @@ -75,7 +75,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
>  	return paddr;
>  }
>  
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
>  {
>  	long nid;
>  #ifdef CONFIG_PHYS48_TO_HT40
> diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
> index a5b59c765426..7702875aabb7 100644
> --- a/arch/powerpc/include/asm/dma-direct.h
> +++ b/arch/powerpc/include/asm/dma-direct.h
> @@ -17,12 +17,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  	return addr + size - 1 <= *dev->dma_mask;
>  }
>  
> -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	return paddr + get_dma_offset(dev);
>  }
>  
> -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
>  {
>  	return daddr - get_dma_offset(dev);
>  }
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 10f482beda1b..1ca4f0874517 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -54,7 +54,6 @@ config X86
>  	select ARCH_HAS_FORTIFY_SOURCE
>  	select ARCH_HAS_GCOV_PROFILE_ALL
>  	select ARCH_HAS_KCOV			if X86_64
> -	select ARCH_HAS_PHYS_TO_DMA
>  	select ARCH_HAS_MEMBARRIER_SYNC_CORE
>  	select ARCH_HAS_PMEM_API		if X86_64
>  	select ARCH_HAS_REFCOUNT
> @@ -692,6 +691,7 @@ config X86_SUPPORTS_MEMORY_FAILURE
>  config STA2X11
>  	bool "STA2X11 Companion Chip Support"
>  	depends on X86_32_NON_STANDARD && PCI
> +	select ARCH_HAS_PHYS_TO_DMA
>  	select X86_DEV_DMA_OPS
>  	select X86_DMA_REMAP
>  	select SWIOTLB
> diff --git a/arch/x86/include/asm/dma-direct.h b/arch/x86/include/asm/dma-direct.h
> index 1295bc622ebe..1a19251eaac9 100644
> --- a/arch/x86/include/asm/dma-direct.h
> +++ b/arch/x86/include/asm/dma-direct.h
> @@ -2,29 +2,8 @@
>  #ifndef ASM_X86_DMA_DIRECT_H
>  #define ASM_X86_DMA_DIRECT_H 1
>  
> -#include <linux/mem_encrypt.h>
> -
> -#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
>  bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
> -#else
> -static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
> -{
> -	if (!dev->dma_mask)
> -		return 0;
> -
> -	return addr + size - 1 <= *dev->dma_mask;
> -}
> -
> -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> -{
> -	return __sme_set(paddr);
> -}
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
>  
> -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> -{
> -	return __sme_clr(daddr);
> -}
> -#endif /* CONFIG_X86_DMA_REMAP */
>  #endif /* ASM_X86_DMA_DIRECT_H */
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index d3b80d5f9828..1a05bea831a8 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -216,7 +216,7 @@ static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
>  		 * Since we will be clearing the encryption bit, check the
>  		 * mask with it already cleared.
>  		 */
> -		addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
> +		addr = __phys_to_dma(dev, page_to_phys(page));
>  		if ((addr + size) > dev->coherent_dma_mask) {
>  			__free_pages(page, get_order(size));
>  		} else {
> diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
> index eac58e03f43c..7a5bafb76d77 100644
> --- a/arch/x86/pci/sta2x11-fixup.c
> +++ b/arch/x86/pci/sta2x11-fixup.c
> @@ -207,11 +207,11 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  }
>  
>  /**
> - * phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
> + * __phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
>   * @dev: device for a PCI device
>   * @paddr: Physical address
>   */
> -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	if (!dev->archdata.is_sta2x11)
>  		return paddr;
> @@ -223,7 +223,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
>   * @dev: device for a PCI device
>   * @daddr: STA2x11 AMBA DMA address
>   */
> -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
>  {
>  	if (!dev->archdata.is_sta2x11)
>  		return daddr;
> diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
> index bcdb1a3e4b1f..53ad6a47f513 100644
> --- a/include/linux/dma-direct.h
> +++ b/include/linux/dma-direct.h
> @@ -3,18 +3,19 @@
>  #define _LINUX_DMA_DIRECT_H 1
>  
>  #include <linux/dma-mapping.h>
> +#include <linux/mem_encrypt.h>
>  
>  #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
>  #include <asm/dma-direct.h>
>  #else
> -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
>  {
>  	dma_addr_t dev_addr = (dma_addr_t)paddr;
>  
>  	return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
>  }
>  
> -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
> +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
>  {
>  	phys_addr_t paddr = (phys_addr_t)dev_addr;
>  
> @@ -30,6 +31,22 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  }
>  #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
>  
> +/*
> + * If memory encryption is supported, phys_to_dma will set the memory encryption
> + * bit in the DMA address, and dma_to_phys will clear it.  The raw __phys_to_dma
> + * and __dma_to_phys versions should only be used on non-encrypted memory for
> + * special occasions like DMA coherent buffers.
> + */
> +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
> +{
> +	return __sme_set(__phys_to_dma(dev, paddr));
> +}
> +
> +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
> +{
> +	return __sme_clr(__dma_to_phys(dev, daddr));
> +}
> +
>  #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
>  void dma_mark_clean(void *addr, size_t size);
>  #else
> diff --git a/lib/swiotlb.c b/lib/swiotlb.c
> index 005d1d87bb2e..8b06b4485e65 100644
> --- a/lib/swiotlb.c
> +++ b/lib/swiotlb.c
> @@ -157,13 +157,6 @@ unsigned long swiotlb_size_or_default(void)
>  	return size ? size : (IO_TLB_DEFAULT_SIZE);
>  }
>  
> -/* For swiotlb, clear memory encryption mask from dma addresses */
> -static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
> -				      phys_addr_t address)
> -{
> -	return __sme_clr(phys_to_dma(hwdev, address));
> -}
> -
>  /* Note that this doesn't work with highmem page */
>  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
>  				      volatile void *address)
> @@ -622,7 +615,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
>  		return SWIOTLB_MAP_ERROR;
>  	}
>  
> -	start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
> +	start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
>  	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
>  				      dir, attrs);
>  }
> @@ -726,12 +719,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
>  		goto out_warn;
>  
>  	phys_addr = swiotlb_tbl_map_single(dev,
> -			swiotlb_phys_to_dma(dev, io_tlb_start),
> +			__phys_to_dma(dev, io_tlb_start),
>  			0, size, DMA_FROM_DEVICE, 0);
>  	if (phys_addr == SWIOTLB_MAP_ERROR)
>  		goto out_warn;
>  
> -	*dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
> +	*dma_handle = __phys_to_dma(dev, phys_addr);
>  	if (dma_coherent_ok(dev, *dma_handle, size))
>  		goto out_unmap;
>  
> @@ -867,10 +860,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
>  	map = map_single(dev, phys, size, dir, attrs);
>  	if (map == SWIOTLB_MAP_ERROR) {
>  		swiotlb_full(dev, size, dir, 1);
> -		return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
> +		return __phys_to_dma(dev, io_tlb_overflow_buffer);
>  	}
>  
> -	dev_addr = swiotlb_phys_to_dma(dev, map);
> +	dev_addr = __phys_to_dma(dev, map);
>  
>  	/* Ensure that the address returned is DMA'ble */
>  	if (dma_capable(dev, dev_addr, size))
> @@ -879,7 +872,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
>  	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
>  	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
>  
> -	return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
> +	return __phys_to_dma(dev, io_tlb_overflow_buffer);
>  }
>  
>  /*
> @@ -1009,7 +1002,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
>  				sg_dma_len(sgl) = 0;
>  				return 0;
>  			}
> -			sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
> +			sg->dma_address = __phys_to_dma(hwdev, map);
>  		} else
>  			sg->dma_address = dev_addr;
>  		sg_dma_len(sg) = sg->length;
> @@ -1073,7 +1066,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
>  int
>  swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
>  {
> -	return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
> +	return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
>  }
>  
>  /*
> @@ -1085,7 +1078,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
>  int
>  swiotlb_dma_supported(struct device *hwdev, u64 mask)
>  {
> -	return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
> +	return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
>  }
>  
>  #ifdef CONFIG_DMA_DIRECT_OPS
> 

  reply	other threads:[~2018-03-19 14:50 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-19 10:38 use generic dma-direct and swiotlb code for x86 V3 Christoph Hellwig
2018-03-19 10:38 ` Christoph Hellwig
2018-03-19 10:38 ` [PATCH 01/14] x86: remove X86_PPRO_FENCE Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-20 11:04   ` [tip:x86/pti] x86/cpu: Remove the CONFIG_X86_PPRO_FENCE=y quirk tip-bot for Christoph Hellwig
2018-03-20 12:51     ` Peter Zijlstra
2018-03-19 10:38 ` [PATCH 02/14] x86: remove dma_alloc_coherent_mask Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:48   ` [tip:x86/dma] x86/dma: Remove dma_alloc_coherent_mask() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 03/14] x86: use dma-direct Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:49   ` [tip:x86/dma] x86/dma: Use DMA-direct (CONFIG_DMA_DIRECT_OPS=y) tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 04/14] x86: use generic swiotlb_ops Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:49   ` [tip:x86/dma] x86/dma: Use " tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 05/14] x86/amd_gart: look at coherent_dma_mask instead of GFP_DMA Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:50   ` [tip:x86/dma] x86/dma/amd_gart: Look at dev->coherent_dma_mask " tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 06/14] x86/amd_gart: use dma_direct_{alloc,free} Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:50   ` [tip:x86/dma] x86/dma/amd_gart: Use dma_direct_{alloc,free}() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 07/14] iommu/amd_iommu: use dma_direct_{alloc,free} Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:51   ` [tip:x86/dma] iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 08/14] iommu/intel-iommu: cleanup intel_{alloc,free}_coherent Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:51   ` [tip:x86/dma] iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 09/14] x86: remove dma_alloc_coherent_gfp_flags Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:52   ` [tip:x86/dma] x86/dma: Remove dma_alloc_coherent_gfp_flags() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 10/14] set_memory.h: provide set_memory_{en,de}crypted stubs Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-19 14:21   ` Tom Lendacky
2018-03-19 14:21     ` Tom Lendacky
2018-03-23 19:52   ` [tip:x86/dma] set_memory.h: Provide set_memory_{en,de}crypted() stubs tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 11/14] swiotlb: remove swiotlb_set_mem_attributes Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-19 14:41   ` Tom Lendacky
2018-03-19 14:41     ` Tom Lendacky
2018-03-23 19:52   ` [tip:x86/dma] dma/swiotlb: Remove swiotlb_set_mem_attributes() tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 12/14] dma-direct: handle the memory encryption bit in common code Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-19 14:50   ` Tom Lendacky [this message]
2018-03-19 14:50     ` Tom Lendacky
2018-03-19 15:19   ` Robin Murphy
2018-03-19 15:19     ` Robin Murphy
2018-03-19 15:24     ` Christoph Hellwig
2018-03-19 15:24       ` Christoph Hellwig
2018-03-19 15:37       ` Robin Murphy
2018-03-19 15:37         ` Robin Murphy
2018-03-19 15:48         ` Will Deacon
2018-03-19 15:48           ` Will Deacon
2018-03-19 16:03           ` Christoph Hellwig
2018-03-19 16:03             ` Christoph Hellwig
2018-03-19 16:55             ` Will Deacon
2018-03-19 16:55               ` Will Deacon
2018-03-19 18:01             ` Catalin Marinas
2018-03-19 18:01               ` Catalin Marinas
2018-03-19 19:49               ` Christoph Hellwig
2018-03-19 19:49                 ` Christoph Hellwig
2018-03-20 16:23                 ` Catalin Marinas
2018-03-20 16:23                   ` Catalin Marinas
2018-03-23 19:53   ` [tip:x86/dma] dma/direct: Handle " tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 13/14] dma-direct: handle force decryption for dma coherent buffers " Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-19 14:51   ` Tom Lendacky
2018-03-19 14:51     ` Tom Lendacky
2018-03-23 19:53   ` [tip:x86/dma] dma/direct: Handle force decryption for DMA " tip-bot for Christoph Hellwig
2018-03-19 10:38 ` [PATCH 14/14] swiotlb: remove swiotlb_{alloc,free}_coherent Christoph Hellwig
2018-03-19 10:38   ` Christoph Hellwig
2018-03-23 19:54   ` [tip:x86/dma] dma/swiotlb: Remove swiotlb_{alloc,free}_coherent() tip-bot for Christoph Hellwig
2018-03-19 14:00 ` use generic dma-direct and swiotlb code for x86 V3 Tom Lendacky
2018-03-19 14:00   ` Tom Lendacky
2018-03-19 14:56 ` Thomas Gleixner
2018-03-19 14:56   ` Thomas Gleixner
2018-03-19 15:27 ` Konrad Rzeszutek Wilk
2018-03-19 15:27   ` Konrad Rzeszutek Wilk
2018-03-19 15:28   ` Christoph Hellwig
2018-03-20  8:37     ` Ingo Molnar
2018-03-20  8:37       ` Ingo Molnar
2018-03-20  8:44       ` Christoph Hellwig
2018-03-20  8:44         ` Christoph Hellwig
2018-03-20  9:03         ` Ingo Molnar
2018-03-20  9:03           ` Ingo Molnar
     [not found]           ` <20180320090351.2qnwcsauhodrqxdj-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-03-20 11:25             ` Konrad Rzeszutek Wilk
2018-03-20 15:16               ` Christoph Hellwig
2018-03-20 15:16                 ` Christoph Hellwig
2018-03-21 14:32                 ` Konrad Rzeszutek Wilk
2018-03-21 14:32                   ` Konrad Rzeszutek Wilk
  -- strict thread matches above, loose matches on Subject: below --
2018-03-14 17:51 use generic dma-direct and swiotlb code for x86 V2 Christoph Hellwig
2018-03-14 17:52 ` [PATCH 12/14] dma-direct: handle the memory encryption bit in common code Christoph Hellwig
2018-03-14 17:52   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6d05efef-3e2a-5875-aed6-0680d5b5e5be@amd.com \
    --to=thomas.lendacky@amd.com \
    --cc=dwmw2@infradead.org \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jdmason@kudzu.us \
    --cc=joro@8bytes.org \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mulix@mulix.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.