All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Christoph Hellwig <hch@lst.de>, iommu@lists.linux-foundation.org
Cc: linux-arch@vger.kernel.org, Sekhar Nori <nsekhar@ti.com>,
	Russell King <linux@armlinux.org.uk>,
	linux-kernel@vger.kernel.org, Robin Murphy <robin.murphy@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Michael Ellerman <mpe@ellerman.id.au>
Subject: Re: [PATCH 8/9] dma-mapping: move large parts of <linux/dma-direct.h> to kernel/dma
Date: Mon, 19 Oct 2020 13:25:02 +1100	[thread overview]
Message-ID: <afaf49d9-5465-4b1a-dac1-91688ba4abbf@ozlabs.ru> (raw)
In-Reply-To: <20200930085548.920261-9-hch@lst.de>



On 30/09/2020 18:55, Christoph Hellwig wrote:
> Most of the dma_direct symbols should only be used by direct.c and
> mapping.c, so move them to kernel/dma.  In fact more of dma-direct.h
> should eventually move, but that will require more coordination with
> other subsystems.

Because of this change, 
http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20200713062348.100552-1-aik@ozlabs.ru/ 
does not work anymore.

Should I send a patch moving 
dma_direct_map_sg/dma_direct_map_page/+unmap back to include/ or there 
is a better idea? thanks,


> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   include/linux/dma-direct.h | 106 ---------------------------------
>   kernel/dma/direct.c        |   2 +-
>   kernel/dma/direct.h        | 119 +++++++++++++++++++++++++++++++++++++
>   kernel/dma/mapping.c       |   2 +-
>   4 files changed, 121 insertions(+), 108 deletions(-)
>   create mode 100644 kernel/dma/direct.h
> 
> diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
> index 38ed3b55034d50..a2d6640c42c04e 100644
> --- a/include/linux/dma-direct.h
> +++ b/include/linux/dma-direct.h
> @@ -120,114 +120,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>   void dma_direct_free_pages(struct device *dev, size_t size,
>   		struct page *page, dma_addr_t dma_addr,
>   		enum dma_data_direction dir);
> -int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
> -		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> -		unsigned long attrs);
> -bool dma_direct_can_mmap(struct device *dev);
> -int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
> -		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> -		unsigned long attrs);
>   int dma_direct_supported(struct device *dev, u64 mask);
> -bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
> -int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
> -		enum dma_data_direction dir, unsigned long attrs);
>   dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
>   		size_t size, enum dma_data_direction dir, unsigned long attrs);
> -size_t dma_direct_max_mapping_size(struct device *dev);
>   
> -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
> -    defined(CONFIG_SWIOTLB)
> -void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> -		int nents, enum dma_data_direction dir);
> -#else
> -static inline void dma_direct_sync_sg_for_device(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> -{
> -}
> -#endif
> -
> -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
> -    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
> -    defined(CONFIG_SWIOTLB)
> -void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> -		int nents, enum dma_data_direction dir, unsigned long attrs);
> -void dma_direct_sync_sg_for_cpu(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
> -#else
> -static inline void dma_direct_unmap_sg(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
> -		unsigned long attrs)
> -{
> -}
> -static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> -{
> -}
> -#endif
> -
> -static inline void dma_direct_sync_single_for_device(struct device *dev,
> -		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> -{
> -	phys_addr_t paddr = dma_to_phys(dev, addr);
> -
> -	if (unlikely(is_swiotlb_buffer(paddr)))
> -		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
> -
> -	if (!dev_is_dma_coherent(dev))
> -		arch_sync_dma_for_device(paddr, size, dir);
> -}
> -
> -static inline void dma_direct_sync_single_for_cpu(struct device *dev,
> -		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> -{
> -	phys_addr_t paddr = dma_to_phys(dev, addr);
> -
> -	if (!dev_is_dma_coherent(dev)) {
> -		arch_sync_dma_for_cpu(paddr, size, dir);
> -		arch_sync_dma_for_cpu_all();
> -	}
> -
> -	if (unlikely(is_swiotlb_buffer(paddr)))
> -		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
> -
> -	if (dir == DMA_FROM_DEVICE)
> -		arch_dma_mark_clean(paddr, size);
> -}
> -
> -static inline dma_addr_t dma_direct_map_page(struct device *dev,
> -		struct page *page, unsigned long offset, size_t size,
> -		enum dma_data_direction dir, unsigned long attrs)
> -{
> -	phys_addr_t phys = page_to_phys(page) + offset;
> -	dma_addr_t dma_addr = phys_to_dma(dev, phys);
> -
> -	if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> -		return swiotlb_map(dev, phys, size, dir, attrs);
> -
> -	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> -		if (swiotlb_force != SWIOTLB_NO_FORCE)
> -			return swiotlb_map(dev, phys, size, dir, attrs);
> -
> -		dev_WARN_ONCE(dev, 1,
> -			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> -			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> -		return DMA_MAPPING_ERROR;
> -	}
> -
> -	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -		arch_sync_dma_for_device(phys, size, dir);
> -	return dma_addr;
> -}
> -
> -static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
> -		size_t size, enum dma_data_direction dir, unsigned long attrs)
> -{
> -	phys_addr_t phys = dma_to_phys(dev, addr);
> -
> -	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
> -
> -	if (unlikely(is_swiotlb_buffer(phys)))
> -		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
> -}
>   #endif /* _LINUX_DMA_DIRECT_H */
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 87697c86f0b82a..bf9f77623022bb 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -7,13 +7,13 @@
>   #include <linux/memblock.h> /* for max_pfn */
>   #include <linux/export.h>
>   #include <linux/mm.h>
> -#include <linux/dma-direct.h>
>   #include <linux/dma-map-ops.h>
>   #include <linux/scatterlist.h>
>   #include <linux/pfn.h>
>   #include <linux/vmalloc.h>
>   #include <linux/set_memory.h>
>   #include <linux/slab.h>
> +#include "direct.h"
>   
>   /*
>    * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
> diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
> new file mode 100644
> index 00000000000000..b9861557873768
> --- /dev/null
> +++ b/kernel/dma/direct.h
> @@ -0,0 +1,119 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2018 Christoph Hellwig.
> + *
> + * DMA operations that map physical memory directly without using an IOMMU.
> + */
> +#ifndef _KERNEL_DMA_DIRECT_H
> +#define _KERNEL_DMA_DIRECT_H
> +
> +#include <linux/dma-direct.h>
> +
> +int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
> +		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> +		unsigned long attrs);
> +bool dma_direct_can_mmap(struct device *dev);
> +int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
> +		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> +		unsigned long attrs);
> +bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
> +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
> +		enum dma_data_direction dir, unsigned long attrs);
> +size_t dma_direct_max_mapping_size(struct device *dev);
> +
> +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
> +    defined(CONFIG_SWIOTLB)
> +void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> +		int nents, enum dma_data_direction dir);
> +#else
> +static inline void dma_direct_sync_sg_for_device(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> +{
> +}
> +#endif
> +
> +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
> +    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
> +    defined(CONFIG_SWIOTLB)
> +void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> +		int nents, enum dma_data_direction dir, unsigned long attrs);
> +void dma_direct_sync_sg_for_cpu(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
> +#else
> +static inline void dma_direct_unmap_sg(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
> +		unsigned long attrs)
> +{
> +}
> +static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> +{
> +}
> +#endif
> +
> +static inline void dma_direct_sync_single_for_device(struct device *dev,
> +		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> +{
> +	phys_addr_t paddr = dma_to_phys(dev, addr);
> +
> +	if (unlikely(is_swiotlb_buffer(paddr)))
> +		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
> +
> +	if (!dev_is_dma_coherent(dev))
> +		arch_sync_dma_for_device(paddr, size, dir);
> +}
> +
> +static inline void dma_direct_sync_single_for_cpu(struct device *dev,
> +		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> +{
> +	phys_addr_t paddr = dma_to_phys(dev, addr);
> +
> +	if (!dev_is_dma_coherent(dev)) {
> +		arch_sync_dma_for_cpu(paddr, size, dir);
> +		arch_sync_dma_for_cpu_all();
> +	}
> +
> +	if (unlikely(is_swiotlb_buffer(paddr)))
> +		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
> +
> +	if (dir == DMA_FROM_DEVICE)
> +		arch_dma_mark_clean(paddr, size);
> +}
> +
> +static inline dma_addr_t dma_direct_map_page(struct device *dev,
> +		struct page *page, unsigned long offset, size_t size,
> +		enum dma_data_direction dir, unsigned long attrs)
> +{
> +	phys_addr_t phys = page_to_phys(page) + offset;
> +	dma_addr_t dma_addr = phys_to_dma(dev, phys);
> +
> +	if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> +		return swiotlb_map(dev, phys, size, dir, attrs);
> +
> +	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> +		if (swiotlb_force != SWIOTLB_NO_FORCE)
> +			return swiotlb_map(dev, phys, size, dir, attrs);
> +
> +		dev_WARN_ONCE(dev, 1,
> +			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> +			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> +		return DMA_MAPPING_ERROR;
> +	}
> +
> +	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> +		arch_sync_dma_for_device(phys, size, dir);
> +	return dma_addr;
> +}
> +
> +static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
> +		size_t size, enum dma_data_direction dir, unsigned long attrs)
> +{
> +	phys_addr_t phys = dma_to_phys(dev, addr);
> +
> +	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> +		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
> +
> +	if (unlikely(is_swiotlb_buffer(phys)))
> +		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
> +}
> +#endif /* _KERNEL_DMA_DIRECT_H */
> diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
> index 335ba183e0956a..51bb8fa8eb8948 100644
> --- a/kernel/dma/mapping.c
> +++ b/kernel/dma/mapping.c
> @@ -7,7 +7,6 @@
>    */
>   #include <linux/memblock.h> /* for max_pfn */
>   #include <linux/acpi.h>
> -#include <linux/dma-direct.h>
>   #include <linux/dma-map-ops.h>
>   #include <linux/export.h>
>   #include <linux/gfp.h>
> @@ -15,6 +14,7 @@
>   #include <linux/slab.h>
>   #include <linux/vmalloc.h>
>   #include "debug.h"
> +#include "direct.h"
>   
>   /*
>    * Managed DMA API
> 

-- 
Alexey

WARNING: multiple messages have this Message-ID (diff)
From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Christoph Hellwig <hch@lst.de>, iommu@lists.linux-foundation.org
Cc: linux-arch@vger.kernel.org, Michael Ellerman <mpe@ellerman.id.au>,
	Sekhar Nori <nsekhar@ti.com>,
	Russell King <linux@armlinux.org.uk>,
	linux-kernel@vger.kernel.org, Robin Murphy <robin.murphy@arm.com>,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH 8/9] dma-mapping: move large parts of <linux/dma-direct.h> to kernel/dma
Date: Mon, 19 Oct 2020 13:25:02 +1100	[thread overview]
Message-ID: <afaf49d9-5465-4b1a-dac1-91688ba4abbf@ozlabs.ru> (raw)
In-Reply-To: <20200930085548.920261-9-hch@lst.de>



On 30/09/2020 18:55, Christoph Hellwig wrote:
> Most of the dma_direct symbols should only be used by direct.c and
> mapping.c, so move them to kernel/dma.  In fact more of dma-direct.h
> should eventually move, but that will require more coordination with
> other subsystems.

Because of this change, 
http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20200713062348.100552-1-aik@ozlabs.ru/ 
does not work anymore.

Should I send a patch moving 
dma_direct_map_sg/dma_direct_map_page/+unmap back to include/ or there 
is a better idea? thanks,


> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   include/linux/dma-direct.h | 106 ---------------------------------
>   kernel/dma/direct.c        |   2 +-
>   kernel/dma/direct.h        | 119 +++++++++++++++++++++++++++++++++++++
>   kernel/dma/mapping.c       |   2 +-
>   4 files changed, 121 insertions(+), 108 deletions(-)
>   create mode 100644 kernel/dma/direct.h
> 
> diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
> index 38ed3b55034d50..a2d6640c42c04e 100644
> --- a/include/linux/dma-direct.h
> +++ b/include/linux/dma-direct.h
> @@ -120,114 +120,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>   void dma_direct_free_pages(struct device *dev, size_t size,
>   		struct page *page, dma_addr_t dma_addr,
>   		enum dma_data_direction dir);
> -int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
> -		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> -		unsigned long attrs);
> -bool dma_direct_can_mmap(struct device *dev);
> -int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
> -		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> -		unsigned long attrs);
>   int dma_direct_supported(struct device *dev, u64 mask);
> -bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
> -int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
> -		enum dma_data_direction dir, unsigned long attrs);
>   dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
>   		size_t size, enum dma_data_direction dir, unsigned long attrs);
> -size_t dma_direct_max_mapping_size(struct device *dev);
>   
> -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
> -    defined(CONFIG_SWIOTLB)
> -void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> -		int nents, enum dma_data_direction dir);
> -#else
> -static inline void dma_direct_sync_sg_for_device(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> -{
> -}
> -#endif
> -
> -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
> -    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
> -    defined(CONFIG_SWIOTLB)
> -void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> -		int nents, enum dma_data_direction dir, unsigned long attrs);
> -void dma_direct_sync_sg_for_cpu(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
> -#else
> -static inline void dma_direct_unmap_sg(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
> -		unsigned long attrs)
> -{
> -}
> -static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> -{
> -}
> -#endif
> -
> -static inline void dma_direct_sync_single_for_device(struct device *dev,
> -		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> -{
> -	phys_addr_t paddr = dma_to_phys(dev, addr);
> -
> -	if (unlikely(is_swiotlb_buffer(paddr)))
> -		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
> -
> -	if (!dev_is_dma_coherent(dev))
> -		arch_sync_dma_for_device(paddr, size, dir);
> -}
> -
> -static inline void dma_direct_sync_single_for_cpu(struct device *dev,
> -		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> -{
> -	phys_addr_t paddr = dma_to_phys(dev, addr);
> -
> -	if (!dev_is_dma_coherent(dev)) {
> -		arch_sync_dma_for_cpu(paddr, size, dir);
> -		arch_sync_dma_for_cpu_all();
> -	}
> -
> -	if (unlikely(is_swiotlb_buffer(paddr)))
> -		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
> -
> -	if (dir == DMA_FROM_DEVICE)
> -		arch_dma_mark_clean(paddr, size);
> -}
> -
> -static inline dma_addr_t dma_direct_map_page(struct device *dev,
> -		struct page *page, unsigned long offset, size_t size,
> -		enum dma_data_direction dir, unsigned long attrs)
> -{
> -	phys_addr_t phys = page_to_phys(page) + offset;
> -	dma_addr_t dma_addr = phys_to_dma(dev, phys);
> -
> -	if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> -		return swiotlb_map(dev, phys, size, dir, attrs);
> -
> -	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> -		if (swiotlb_force != SWIOTLB_NO_FORCE)
> -			return swiotlb_map(dev, phys, size, dir, attrs);
> -
> -		dev_WARN_ONCE(dev, 1,
> -			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> -			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> -		return DMA_MAPPING_ERROR;
> -	}
> -
> -	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -		arch_sync_dma_for_device(phys, size, dir);
> -	return dma_addr;
> -}
> -
> -static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
> -		size_t size, enum dma_data_direction dir, unsigned long attrs)
> -{
> -	phys_addr_t phys = dma_to_phys(dev, addr);
> -
> -	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
> -
> -	if (unlikely(is_swiotlb_buffer(phys)))
> -		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
> -}
>   #endif /* _LINUX_DMA_DIRECT_H */
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 87697c86f0b82a..bf9f77623022bb 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -7,13 +7,13 @@
>   #include <linux/memblock.h> /* for max_pfn */
>   #include <linux/export.h>
>   #include <linux/mm.h>
> -#include <linux/dma-direct.h>
>   #include <linux/dma-map-ops.h>
>   #include <linux/scatterlist.h>
>   #include <linux/pfn.h>
>   #include <linux/vmalloc.h>
>   #include <linux/set_memory.h>
>   #include <linux/slab.h>
> +#include "direct.h"
>   
>   /*
>    * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
> diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
> new file mode 100644
> index 00000000000000..b9861557873768
> --- /dev/null
> +++ b/kernel/dma/direct.h
> @@ -0,0 +1,119 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2018 Christoph Hellwig.
> + *
> + * DMA operations that map physical memory directly without using an IOMMU.
> + */
> +#ifndef _KERNEL_DMA_DIRECT_H
> +#define _KERNEL_DMA_DIRECT_H
> +
> +#include <linux/dma-direct.h>
> +
> +int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
> +		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> +		unsigned long attrs);
> +bool dma_direct_can_mmap(struct device *dev);
> +int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
> +		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> +		unsigned long attrs);
> +bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
> +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
> +		enum dma_data_direction dir, unsigned long attrs);
> +size_t dma_direct_max_mapping_size(struct device *dev);
> +
> +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
> +    defined(CONFIG_SWIOTLB)
> +void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> +		int nents, enum dma_data_direction dir);
> +#else
> +static inline void dma_direct_sync_sg_for_device(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> +{
> +}
> +#endif
> +
> +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
> +    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
> +    defined(CONFIG_SWIOTLB)
> +void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> +		int nents, enum dma_data_direction dir, unsigned long attrs);
> +void dma_direct_sync_sg_for_cpu(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
> +#else
> +static inline void dma_direct_unmap_sg(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
> +		unsigned long attrs)
> +{
> +}
> +static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> +{
> +}
> +#endif
> +
> +static inline void dma_direct_sync_single_for_device(struct device *dev,
> +		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> +{
> +	phys_addr_t paddr = dma_to_phys(dev, addr);
> +
> +	if (unlikely(is_swiotlb_buffer(paddr)))
> +		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
> +
> +	if (!dev_is_dma_coherent(dev))
> +		arch_sync_dma_for_device(paddr, size, dir);
> +}
> +
> +static inline void dma_direct_sync_single_for_cpu(struct device *dev,
> +		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> +{
> +	phys_addr_t paddr = dma_to_phys(dev, addr);
> +
> +	if (!dev_is_dma_coherent(dev)) {
> +		arch_sync_dma_for_cpu(paddr, size, dir);
> +		arch_sync_dma_for_cpu_all();
> +	}
> +
> +	if (unlikely(is_swiotlb_buffer(paddr)))
> +		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
> +
> +	if (dir == DMA_FROM_DEVICE)
> +		arch_dma_mark_clean(paddr, size);
> +}
> +
> +static inline dma_addr_t dma_direct_map_page(struct device *dev,
> +		struct page *page, unsigned long offset, size_t size,
> +		enum dma_data_direction dir, unsigned long attrs)
> +{
> +	phys_addr_t phys = page_to_phys(page) + offset;
> +	dma_addr_t dma_addr = phys_to_dma(dev, phys);
> +
> +	if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> +		return swiotlb_map(dev, phys, size, dir, attrs);
> +
> +	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> +		if (swiotlb_force != SWIOTLB_NO_FORCE)
> +			return swiotlb_map(dev, phys, size, dir, attrs);
> +
> +		dev_WARN_ONCE(dev, 1,
> +			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> +			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> +		return DMA_MAPPING_ERROR;
> +	}
> +
> +	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> +		arch_sync_dma_for_device(phys, size, dir);
> +	return dma_addr;
> +}
> +
> +static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
> +		size_t size, enum dma_data_direction dir, unsigned long attrs)
> +{
> +	phys_addr_t phys = dma_to_phys(dev, addr);
> +
> +	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> +		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
> +
> +	if (unlikely(is_swiotlb_buffer(phys)))
> +		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
> +}
> +#endif /* _KERNEL_DMA_DIRECT_H */
> diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
> index 335ba183e0956a..51bb8fa8eb8948 100644
> --- a/kernel/dma/mapping.c
> +++ b/kernel/dma/mapping.c
> @@ -7,7 +7,6 @@
>    */
>   #include <linux/memblock.h> /* for max_pfn */
>   #include <linux/acpi.h>
> -#include <linux/dma-direct.h>
>   #include <linux/dma-map-ops.h>
>   #include <linux/export.h>
>   #include <linux/gfp.h>
> @@ -15,6 +14,7 @@
>   #include <linux/slab.h>
>   #include <linux/vmalloc.h>
>   #include "debug.h"
> +#include "direct.h"
>   
>   /*
>    * Managed DMA API
> 

-- 
Alexey
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Christoph Hellwig <hch@lst.de>, iommu@lists.linux-foundation.org
Cc: linux-arch@vger.kernel.org, Michael Ellerman <mpe@ellerman.id.au>,
	Sekhar Nori <nsekhar@ti.com>,
	Russell King <linux@armlinux.org.uk>,
	linux-kernel@vger.kernel.org, Robin Murphy <robin.murphy@arm.com>,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH 8/9] dma-mapping: move large parts of <linux/dma-direct.h> to kernel/dma
Date: Mon, 19 Oct 2020 13:25:02 +1100	[thread overview]
Message-ID: <afaf49d9-5465-4b1a-dac1-91688ba4abbf@ozlabs.ru> (raw)
In-Reply-To: <20200930085548.920261-9-hch@lst.de>



On 30/09/2020 18:55, Christoph Hellwig wrote:
> Most of the dma_direct symbols should only be used by direct.c and
> mapping.c, so move them to kernel/dma.  In fact more of dma-direct.h
> should eventually move, but that will require more coordination with
> other subsystems.

Because of this change, 
http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20200713062348.100552-1-aik@ozlabs.ru/ 
does not work anymore.

Should I send a patch moving 
dma_direct_map_sg/dma_direct_map_page/+unmap back to include/ or there 
is a better idea? thanks,


> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   include/linux/dma-direct.h | 106 ---------------------------------
>   kernel/dma/direct.c        |   2 +-
>   kernel/dma/direct.h        | 119 +++++++++++++++++++++++++++++++++++++
>   kernel/dma/mapping.c       |   2 +-
>   4 files changed, 121 insertions(+), 108 deletions(-)
>   create mode 100644 kernel/dma/direct.h
> 
> diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
> index 38ed3b55034d50..a2d6640c42c04e 100644
> --- a/include/linux/dma-direct.h
> +++ b/include/linux/dma-direct.h
> @@ -120,114 +120,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>   void dma_direct_free_pages(struct device *dev, size_t size,
>   		struct page *page, dma_addr_t dma_addr,
>   		enum dma_data_direction dir);
> -int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
> -		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> -		unsigned long attrs);
> -bool dma_direct_can_mmap(struct device *dev);
> -int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
> -		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> -		unsigned long attrs);
>   int dma_direct_supported(struct device *dev, u64 mask);
> -bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
> -int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
> -		enum dma_data_direction dir, unsigned long attrs);
>   dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
>   		size_t size, enum dma_data_direction dir, unsigned long attrs);
> -size_t dma_direct_max_mapping_size(struct device *dev);
>   
> -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
> -    defined(CONFIG_SWIOTLB)
> -void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> -		int nents, enum dma_data_direction dir);
> -#else
> -static inline void dma_direct_sync_sg_for_device(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> -{
> -}
> -#endif
> -
> -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
> -    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
> -    defined(CONFIG_SWIOTLB)
> -void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> -		int nents, enum dma_data_direction dir, unsigned long attrs);
> -void dma_direct_sync_sg_for_cpu(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
> -#else
> -static inline void dma_direct_unmap_sg(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
> -		unsigned long attrs)
> -{
> -}
> -static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
> -		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> -{
> -}
> -#endif
> -
> -static inline void dma_direct_sync_single_for_device(struct device *dev,
> -		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> -{
> -	phys_addr_t paddr = dma_to_phys(dev, addr);
> -
> -	if (unlikely(is_swiotlb_buffer(paddr)))
> -		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
> -
> -	if (!dev_is_dma_coherent(dev))
> -		arch_sync_dma_for_device(paddr, size, dir);
> -}
> -
> -static inline void dma_direct_sync_single_for_cpu(struct device *dev,
> -		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> -{
> -	phys_addr_t paddr = dma_to_phys(dev, addr);
> -
> -	if (!dev_is_dma_coherent(dev)) {
> -		arch_sync_dma_for_cpu(paddr, size, dir);
> -		arch_sync_dma_for_cpu_all();
> -	}
> -
> -	if (unlikely(is_swiotlb_buffer(paddr)))
> -		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
> -
> -	if (dir == DMA_FROM_DEVICE)
> -		arch_dma_mark_clean(paddr, size);
> -}
> -
> -static inline dma_addr_t dma_direct_map_page(struct device *dev,
> -		struct page *page, unsigned long offset, size_t size,
> -		enum dma_data_direction dir, unsigned long attrs)
> -{
> -	phys_addr_t phys = page_to_phys(page) + offset;
> -	dma_addr_t dma_addr = phys_to_dma(dev, phys);
> -
> -	if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> -		return swiotlb_map(dev, phys, size, dir, attrs);
> -
> -	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> -		if (swiotlb_force != SWIOTLB_NO_FORCE)
> -			return swiotlb_map(dev, phys, size, dir, attrs);
> -
> -		dev_WARN_ONCE(dev, 1,
> -			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> -			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> -		return DMA_MAPPING_ERROR;
> -	}
> -
> -	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -		arch_sync_dma_for_device(phys, size, dir);
> -	return dma_addr;
> -}
> -
> -static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
> -		size_t size, enum dma_data_direction dir, unsigned long attrs)
> -{
> -	phys_addr_t phys = dma_to_phys(dev, addr);
> -
> -	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
> -
> -	if (unlikely(is_swiotlb_buffer(phys)))
> -		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
> -}
>   #endif /* _LINUX_DMA_DIRECT_H */
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 87697c86f0b82a..bf9f77623022bb 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -7,13 +7,13 @@
>   #include <linux/memblock.h> /* for max_pfn */
>   #include <linux/export.h>
>   #include <linux/mm.h>
> -#include <linux/dma-direct.h>
>   #include <linux/dma-map-ops.h>
>   #include <linux/scatterlist.h>
>   #include <linux/pfn.h>
>   #include <linux/vmalloc.h>
>   #include <linux/set_memory.h>
>   #include <linux/slab.h>
> +#include "direct.h"
>   
>   /*
>    * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
> diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
> new file mode 100644
> index 00000000000000..b9861557873768
> --- /dev/null
> +++ b/kernel/dma/direct.h
> @@ -0,0 +1,119 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2018 Christoph Hellwig.
> + *
> + * DMA operations that map physical memory directly without using an IOMMU.
> + */
> +#ifndef _KERNEL_DMA_DIRECT_H
> +#define _KERNEL_DMA_DIRECT_H
> +
> +#include <linux/dma-direct.h>
> +
> +int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
> +		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> +		unsigned long attrs);
> +bool dma_direct_can_mmap(struct device *dev);
> +int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
> +		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> +		unsigned long attrs);
> +bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
> +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
> +		enum dma_data_direction dir, unsigned long attrs);
> +size_t dma_direct_max_mapping_size(struct device *dev);
> +
> +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
> +    defined(CONFIG_SWIOTLB)
> +void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> +		int nents, enum dma_data_direction dir);
> +#else
> +static inline void dma_direct_sync_sg_for_device(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> +{
> +}
> +#endif
> +
> +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
> +    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
> +    defined(CONFIG_SWIOTLB)
> +void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> +		int nents, enum dma_data_direction dir, unsigned long attrs);
> +void dma_direct_sync_sg_for_cpu(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
> +#else
> +static inline void dma_direct_unmap_sg(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
> +		unsigned long attrs)
> +{
> +}
> +static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
> +		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> +{
> +}
> +#endif
> +
> +static inline void dma_direct_sync_single_for_device(struct device *dev,
> +		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> +{
> +	phys_addr_t paddr = dma_to_phys(dev, addr);
> +
> +	if (unlikely(is_swiotlb_buffer(paddr)))
> +		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
> +
> +	if (!dev_is_dma_coherent(dev))
> +		arch_sync_dma_for_device(paddr, size, dir);
> +}
> +
> +static inline void dma_direct_sync_single_for_cpu(struct device *dev,
> +		dma_addr_t addr, size_t size, enum dma_data_direction dir)
> +{
> +	phys_addr_t paddr = dma_to_phys(dev, addr);
> +
> +	if (!dev_is_dma_coherent(dev)) {
> +		arch_sync_dma_for_cpu(paddr, size, dir);
> +		arch_sync_dma_for_cpu_all();
> +	}
> +
> +	if (unlikely(is_swiotlb_buffer(paddr)))
> +		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
> +
> +	if (dir == DMA_FROM_DEVICE)
> +		arch_dma_mark_clean(paddr, size);
> +}
> +
> +static inline dma_addr_t dma_direct_map_page(struct device *dev,
> +		struct page *page, unsigned long offset, size_t size,
> +		enum dma_data_direction dir, unsigned long attrs)
> +{
> +	phys_addr_t phys = page_to_phys(page) + offset;
> +	dma_addr_t dma_addr = phys_to_dma(dev, phys);
> +
> +	if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> +		return swiotlb_map(dev, phys, size, dir, attrs);
> +
> +	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> +		if (swiotlb_force != SWIOTLB_NO_FORCE)
> +			return swiotlb_map(dev, phys, size, dir, attrs);
> +
> +		dev_WARN_ONCE(dev, 1,
> +			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> +			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> +		return DMA_MAPPING_ERROR;
> +	}
> +
> +	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> +		arch_sync_dma_for_device(phys, size, dir);
> +	return dma_addr;
> +}
> +
> +static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
> +		size_t size, enum dma_data_direction dir, unsigned long attrs)
> +{
> +	phys_addr_t phys = dma_to_phys(dev, addr);
> +
> +	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> +		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
> +
> +	if (unlikely(is_swiotlb_buffer(phys)))
> +		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
> +}
> +#endif /* _KERNEL_DMA_DIRECT_H */
> diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
> index 335ba183e0956a..51bb8fa8eb8948 100644
> --- a/kernel/dma/mapping.c
> +++ b/kernel/dma/mapping.c
> @@ -7,7 +7,6 @@
>    */
>   #include <linux/memblock.h> /* for max_pfn */
>   #include <linux/acpi.h>
> -#include <linux/dma-direct.h>
>   #include <linux/dma-map-ops.h>
>   #include <linux/export.h>
>   #include <linux/gfp.h>
> @@ -15,6 +14,7 @@
>   #include <linux/slab.h>
>   #include <linux/vmalloc.h>
>   #include "debug.h"
> +#include "direct.h"
>   
>   /*
>    * Managed DMA API
> 

-- 
Alexey

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2020-10-19  2:25 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-30  8:55 clean up the DMA mapping headers Christoph Hellwig
2020-09-30  8:55 ` Christoph Hellwig
2020-09-30  8:55 ` Christoph Hellwig
2020-09-30  8:55 ` [PATCH 1/9] dma-mapping: split <linux/dma-mapping.h> Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-10-11 14:33   ` Guenter Roeck
2020-10-11 14:33     ` Guenter Roeck
2020-10-11 14:33     ` Guenter Roeck
2020-10-13 11:29     ` Christoph Hellwig
2020-10-13 11:29       ` Christoph Hellwig
2020-10-13 11:29       ` Christoph Hellwig
2020-09-30  8:55 ` [PATCH 2/9] dma-contiguous: remove dma_declare_contiguous Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55 ` [PATCH 3/9] dma-contiguous: remove dev_set_cma_area Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55 ` [PATCH 4/9] dma-contiguous: remove dma_contiguous_set_default Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55 ` [PATCH 5/9] dma-mapping: merge <linux/dma-contiguous.h> into <linux/dma-map-ops.h> Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55 ` [PATCH 6/9] dma-mapping: remove <asm/dma-contiguous.h> Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55 ` [PATCH 7/9] dma-mapping: move dma-debug.h to kernel/dma/ Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55 ` [PATCH 8/9] dma-mapping: move large parts of <linux/dma-direct.h> to kernel/dma Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-10-19  2:25   ` Alexey Kardashevskiy [this message]
2020-10-19  2:25     ` Alexey Kardashevskiy
2020-10-19  2:25     ` Alexey Kardashevskiy
2020-09-30  8:55 ` [PATCH 9/9] dma-mapping: merge <linux/dma-noncoherent.h> into <linux/dma-map-ops.h> Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-09-30  8:55   ` Christoph Hellwig
2020-10-05 17:05 ` clean up the DMA mapping headers Christoph Hellwig
2020-10-05 17:05   ` Christoph Hellwig
2020-10-05 17:05   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=afaf49d9-5465-4b1a-dac1-91688ba4abbf@ozlabs.ru \
    --to=aik@ozlabs.ru \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=mpe@ellerman.id.au \
    --cc=nsekhar@ti.com \
    --cc=robin.murphy@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.