All of lore.kernel.org
 help / color / mirror / Atom feed
From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
To: Christoph Hellwig <hch@lst.de>, Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	Robin Murphy <robin.murphy@arm.com>,
	linuxppc-dev@lists.ozlabs.org, iommu@lists.linux-foundation.org,
	linux-ia64@vger.kernel.org, Scott Wood <oss@buserror.net>
Subject: Re: [PATCH 16/20] powerpc/dma: use dma_direct_{alloc,free}
Date: Thu, 09 Aug 2018 10:52:56 +1000	[thread overview]
Message-ID: <4d5c934ea7bc9802eb3653e6afe7583cb05e4484.camel@kernel.crashing.org> (raw)
In-Reply-To: <20180730163824.10064-17-hch@lst.de>

On Mon, 2018-07-30 at 18:38 +0200, Christoph Hellwig wrote:
> These do the same functionality as the existing helpers, but do it
> simpler, and also allow the (optional) use of CMA.
> 
> Note that the swiotlb code now calls into the dma_direct code directly,
> given that it doesn't work with noncoherent caches at all, and isn't called
> when we have an iommu either, so the iommu special case in
> dma_nommu_alloc_coherent isn't required for swiotlb.

I am not convinced that this will produce the same results due to
the way the zone picking works.

As for the interaction with swiotlb, we'll need the FSL guys to have
a look. Scott, do you remember what this is about ?

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/powerpc/include/asm/pgtable.h |  1 -
>  arch/powerpc/kernel/dma-swiotlb.c  |  4 +-
>  arch/powerpc/kernel/dma.c          | 78 ++++--------------------------
>  arch/powerpc/mm/mem.c              | 19 --------
>  4 files changed, 11 insertions(+), 91 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 14c79a7dc855..123de4958d2e 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -38,7 +38,6 @@ extern unsigned long empty_zero_page[];
>  extern pgd_t swapper_pg_dir[];
>  
>  void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
> -int dma_pfn_limit_to_zone(u64 pfn_limit);
>  extern void paging_init(void);
>  
>  /*
> diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
> index f6e0701c5303..25986fcd1e5e 100644
> --- a/arch/powerpc/kernel/dma-swiotlb.c
> +++ b/arch/powerpc/kernel/dma-swiotlb.c
> @@ -46,8 +46,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
>   * for everything else.
>   */
>  const struct dma_map_ops powerpc_swiotlb_dma_ops = {
> -	.alloc = __dma_nommu_alloc_coherent,
> -	.free = __dma_nommu_free_coherent,
> +	.alloc = dma_direct_alloc,
> +	.free = dma_direct_free,
>  	.mmap = dma_nommu_mmap_coherent,
>  	.map_sg = swiotlb_map_sg_attrs,
>  	.unmap_sg = swiotlb_unmap_sg_attrs,
> diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
> index 2cfc45acbb52..2b90a403cdac 100644
> --- a/arch/powerpc/kernel/dma.c
> +++ b/arch/powerpc/kernel/dma.c
> @@ -26,75 +26,6 @@
>   * can set archdata.dma_data to an unsigned long holding the offset. By
>   * default the offset is PCI_DRAM_OFFSET.
>   */
> -
> -static u64 __maybe_unused get_pfn_limit(struct device *dev)
> -{
> -	u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
> -	struct dev_archdata __maybe_unused *sd = &dev->archdata;
> -
> -#ifdef CONFIG_SWIOTLB
> -	if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops)
> -		pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
> -#endif
> -
> -	return pfn;
> -}
> -
> -#ifndef CONFIG_NOT_COHERENT_CACHE
> -void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
> -				  dma_addr_t *dma_handle, gfp_t flag,
> -				  unsigned long attrs)
> -{
> -	void *ret;
> -	struct page *page;
> -	int node = dev_to_node(dev);
> -#ifdef CONFIG_FSL_SOC
> -	u64 pfn = get_pfn_limit(dev);
> -	int zone;
> -
> -	/*
> -	 * This code should be OK on other platforms, but we have drivers that
> -	 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
> -	 * whole routine needs some serious cleanup.
> -	 */
> -
> -	zone = dma_pfn_limit_to_zone(pfn);
> -	if (zone < 0) {
> -		dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
> -			__func__, pfn);
> -		return NULL;
> -	}
> -
> -	switch (zone) {
> -	case ZONE_DMA:
> -		flag |= GFP_DMA;
> -		break;
> -#ifdef CONFIG_ZONE_DMA32
> -	case ZONE_DMA32:
> -		flag |= GFP_DMA32;
> -		break;
> -#endif
> -	};
> -#endif /* CONFIG_FSL_SOC */
> -
> -	page = alloc_pages_node(node, flag, get_order(size));
> -	if (page == NULL)
> -		return NULL;
> -	ret = page_address(page);
> -	memset(ret, 0, size);
> -	*dma_handle = phys_to_dma(dev,__pa(ret));
> -
> -	return ret;
> -}
> -
> -void __dma_nommu_free_coherent(struct device *dev, size_t size,
> -				void *vaddr, dma_addr_t dma_handle,
> -				unsigned long attrs)
> -{
> -	free_pages((unsigned long)vaddr, get_order(size));
> -}
> -#endif /* !CONFIG_NOT_COHERENT_CACHE */
> -
>  static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
>  				       dma_addr_t *dma_handle, gfp_t flag,
>  				       unsigned long attrs)
> @@ -105,8 +36,12 @@ static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
>  	 * we can really use the direct ops
>  	 */
>  	if (dma_direct_supported(dev, dev->coherent_dma_mask))
> +#ifdef CONFIG_NOT_COHERENT_CACHE
>  		return __dma_nommu_alloc_coherent(dev, size, dma_handle,
>  						   flag, attrs);
> +#else
> +		return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
> +#endif
>  
>  	/* Ok we can't ... do we have an iommu ? If not, fail */
>  	iommu = get_iommu_table_base(dev);
> @@ -127,8 +62,13 @@ static void dma_nommu_free_coherent(struct device *dev, size_t size,
>  
>  	/* See comments in dma_nommu_alloc_coherent() */
>  	if (dma_direct_supported(dev, dev->coherent_dma_mask))
> +#ifdef CONFIG_NOT_COHERENT_CACHE
>  		return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
>  						  attrs);
> +#else
> +		return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
> +#endif
> +
>  	/* Maybe we used an iommu ... */
>  	iommu = get_iommu_table_base(dev);
>  
> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> index 5c8530d0c611..ec8ed9d7abef 100644
> --- a/arch/powerpc/mm/mem.c
> +++ b/arch/powerpc/mm/mem.c
> @@ -276,25 +276,6 @@ void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
>  	}
>  }
>  
> -/*
> - * Find the least restrictive zone that is entirely below the
> - * specified pfn limit.  Returns < 0 if no suitable zone is found.
> - *
> - * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
> - * systems -- the DMA limit can be higher than any possible real pfn.
> - */
> -int dma_pfn_limit_to_zone(u64 pfn_limit)
> -{
> -	int i;
> -
> -	for (i = TOP_ZONE; i >= 0; i--) {
> -		if (max_zone_pfns[i] <= pfn_limit)
> -			return i;
> -	}
> -
> -	return -EPERM;
> -}
> -
>  /*
>   * paging_init() sets up the page tables - in fact we've already done this.
>   */

WARNING: multiple messages have this Message-ID (diff)
From: Benjamin Herrenschmidt <benh-XVmvHMARGAS8U2dJNN8I7kB+6BGkLq7r@public.gmane.org>
To: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>,
	Paul Mackerras <paulus-eUNUBHrolfbYtjvyW6yDsg@public.gmane.org>,
	Michael Ellerman <mpe-Gsx/Oe8HsFggBc27wqDAHg@public.gmane.org>,
	Tony Luck <tony.luck-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	Fenghua Yu <fenghua.yu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Cc: linux-ia64-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	Konrad Rzeszutek Wilk
	<konrad.wilk-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>,
	Robin Murphy <robin.murphy-5wv7dgnIgG8@public.gmane.org>,
	Scott Wood <oss-fOR+EgIDQEHk1uMJSBkQmQ@public.gmane.org>,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	linuxppc-dev-uLR06cmDAlY/bJ5BZ2RsiQ@public.gmane.org
Subject: Re: [PATCH 16/20] powerpc/dma: use dma_direct_{alloc,free}
Date: Thu, 09 Aug 2018 10:52:56 +1000	[thread overview]
Message-ID: <4d5c934ea7bc9802eb3653e6afe7583cb05e4484.camel@kernel.crashing.org> (raw)
In-Reply-To: <20180730163824.10064-17-hch-jcswGhMUV9g@public.gmane.org>

On Mon, 2018-07-30 at 18:38 +0200, Christoph Hellwig wrote:
> These do the same functionality as the existing helpers, but do it
> simpler, and also allow the (optional) use of CMA.
> 
> Note that the swiotlb code now calls into the dma_direct code directly,
> given that it doesn't work with noncoherent caches at all, and isn't called
> when we have an iommu either, so the iommu special case in
> dma_nommu_alloc_coherent isn't required for swiotlb.

I am not convinced that this will produce the same results due to
the way the zone picking works.

As for the interaction with swiotlb, we'll need the FSL guys to have
a look. Scott, do you remember what this is about ?

> Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
> ---
>  arch/powerpc/include/asm/pgtable.h |  1 -
>  arch/powerpc/kernel/dma-swiotlb.c  |  4 +-
>  arch/powerpc/kernel/dma.c          | 78 ++++--------------------------
>  arch/powerpc/mm/mem.c              | 19 --------
>  4 files changed, 11 insertions(+), 91 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 14c79a7dc855..123de4958d2e 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -38,7 +38,6 @@ extern unsigned long empty_zero_page[];
>  extern pgd_t swapper_pg_dir[];
>  
>  void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
> -int dma_pfn_limit_to_zone(u64 pfn_limit);
>  extern void paging_init(void);
>  
>  /*
> diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
> index f6e0701c5303..25986fcd1e5e 100644
> --- a/arch/powerpc/kernel/dma-swiotlb.c
> +++ b/arch/powerpc/kernel/dma-swiotlb.c
> @@ -46,8 +46,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
>   * for everything else.
>   */
>  const struct dma_map_ops powerpc_swiotlb_dma_ops = {
> -	.alloc = __dma_nommu_alloc_coherent,
> -	.free = __dma_nommu_free_coherent,
> +	.alloc = dma_direct_alloc,
> +	.free = dma_direct_free,
>  	.mmap = dma_nommu_mmap_coherent,
>  	.map_sg = swiotlb_map_sg_attrs,
>  	.unmap_sg = swiotlb_unmap_sg_attrs,
> diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
> index 2cfc45acbb52..2b90a403cdac 100644
> --- a/arch/powerpc/kernel/dma.c
> +++ b/arch/powerpc/kernel/dma.c
> @@ -26,75 +26,6 @@
>   * can set archdata.dma_data to an unsigned long holding the offset. By
>   * default the offset is PCI_DRAM_OFFSET.
>   */
> -
> -static u64 __maybe_unused get_pfn_limit(struct device *dev)
> -{
> -	u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
> -	struct dev_archdata __maybe_unused *sd = &dev->archdata;
> -
> -#ifdef CONFIG_SWIOTLB
> -	if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops)
> -		pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
> -#endif
> -
> -	return pfn;
> -}
> -
> -#ifndef CONFIG_NOT_COHERENT_CACHE
> -void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
> -				  dma_addr_t *dma_handle, gfp_t flag,
> -				  unsigned long attrs)
> -{
> -	void *ret;
> -	struct page *page;
> -	int node = dev_to_node(dev);
> -#ifdef CONFIG_FSL_SOC
> -	u64 pfn = get_pfn_limit(dev);
> -	int zone;
> -
> -	/*
> -	 * This code should be OK on other platforms, but we have drivers that
> -	 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
> -	 * whole routine needs some serious cleanup.
> -	 */
> -
> -	zone = dma_pfn_limit_to_zone(pfn);
> -	if (zone < 0) {
> -		dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
> -			__func__, pfn);
> -		return NULL;
> -	}
> -
> -	switch (zone) {
> -	case ZONE_DMA:
> -		flag |= GFP_DMA;
> -		break;
> -#ifdef CONFIG_ZONE_DMA32
> -	case ZONE_DMA32:
> -		flag |= GFP_DMA32;
> -		break;
> -#endif
> -	};
> -#endif /* CONFIG_FSL_SOC */
> -
> -	page = alloc_pages_node(node, flag, get_order(size));
> -	if (page == NULL)
> -		return NULL;
> -	ret = page_address(page);
> -	memset(ret, 0, size);
> -	*dma_handle = phys_to_dma(dev,__pa(ret));
> -
> -	return ret;
> -}
> -
> -void __dma_nommu_free_coherent(struct device *dev, size_t size,
> -				void *vaddr, dma_addr_t dma_handle,
> -				unsigned long attrs)
> -{
> -	free_pages((unsigned long)vaddr, get_order(size));
> -}
> -#endif /* !CONFIG_NOT_COHERENT_CACHE */
> -
>  static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
>  				       dma_addr_t *dma_handle, gfp_t flag,
>  				       unsigned long attrs)
> @@ -105,8 +36,12 @@ static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
>  	 * we can really use the direct ops
>  	 */
>  	if (dma_direct_supported(dev, dev->coherent_dma_mask))
> +#ifdef CONFIG_NOT_COHERENT_CACHE
>  		return __dma_nommu_alloc_coherent(dev, size, dma_handle,
>  						   flag, attrs);
> +#else
> +		return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
> +#endif
>  
>  	/* Ok we can't ... do we have an iommu ? If not, fail */
>  	iommu = get_iommu_table_base(dev);
> @@ -127,8 +62,13 @@ static void dma_nommu_free_coherent(struct device *dev, size_t size,
>  
>  	/* See comments in dma_nommu_alloc_coherent() */
>  	if (dma_direct_supported(dev, dev->coherent_dma_mask))
> +#ifdef CONFIG_NOT_COHERENT_CACHE
>  		return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
>  						  attrs);
> +#else
> +		return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
> +#endif
> +
>  	/* Maybe we used an iommu ... */
>  	iommu = get_iommu_table_base(dev);
>  
> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> index 5c8530d0c611..ec8ed9d7abef 100644
> --- a/arch/powerpc/mm/mem.c
> +++ b/arch/powerpc/mm/mem.c
> @@ -276,25 +276,6 @@ void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
>  	}
>  }
>  
> -/*
> - * Find the least restrictive zone that is entirely below the
> - * specified pfn limit.  Returns < 0 if no suitable zone is found.
> - *
> - * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
> - * systems -- the DMA limit can be higher than any possible real pfn.
> - */
> -int dma_pfn_limit_to_zone(u64 pfn_limit)
> -{
> -	int i;
> -
> -	for (i = TOP_ZONE; i >= 0; i--) {
> -		if (max_zone_pfns[i] <= pfn_limit)
> -			return i;
> -	}
> -
> -	return -EPERM;
> -}
> -
>  /*
>   * paging_init() sets up the page tables - in fact we've already done this.
>   */

WARNING: multiple messages have this Message-ID (diff)
From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
To: Christoph Hellwig <hch@lst.de>, Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	Robin Murphy <robin.murphy@arm.com>,
	linuxppc-dev@lists.ozlabs.org, iommu@lists.linux-foundation.org,
	linux-ia64@vger.kernel.org, Scott Wood <oss@buserror.net>
Subject: Re: [PATCH 16/20] powerpc/dma: use dma_direct_{alloc,free}
Date: Thu, 09 Aug 2018 00:52:56 +0000	[thread overview]
Message-ID: <4d5c934ea7bc9802eb3653e6afe7583cb05e4484.camel@kernel.crashing.org> (raw)
In-Reply-To: <20180730163824.10064-17-hch@lst.de>

On Mon, 2018-07-30 at 18:38 +0200, Christoph Hellwig wrote:
> These do the same functionality as the existing helpers, but do it
> simpler, and also allow the (optional) use of CMA.
> 
> Note that the swiotlb code now calls into the dma_direct code directly,
> given that it doesn't work with noncoherent caches at all, and isn't called
> when we have an iommu either, so the iommu special case in
> dma_nommu_alloc_coherent isn't required for swiotlb.

I am not convinced that this will produce the same results due to
the way the zone picking works.

As for the interaction with swiotlb, we'll need the FSL guys to have
a look. Scott, do you remember what this is about ?

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/powerpc/include/asm/pgtable.h |  1 -
>  arch/powerpc/kernel/dma-swiotlb.c  |  4 +-
>  arch/powerpc/kernel/dma.c          | 78 ++++--------------------------
>  arch/powerpc/mm/mem.c              | 19 --------
>  4 files changed, 11 insertions(+), 91 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 14c79a7dc855..123de4958d2e 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -38,7 +38,6 @@ extern unsigned long empty_zero_page[];
>  extern pgd_t swapper_pg_dir[];
>  
>  void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
> -int dma_pfn_limit_to_zone(u64 pfn_limit);
>  extern void paging_init(void);
>  
>  /*
> diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
> index f6e0701c5303..25986fcd1e5e 100644
> --- a/arch/powerpc/kernel/dma-swiotlb.c
> +++ b/arch/powerpc/kernel/dma-swiotlb.c
> @@ -46,8 +46,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
>   * for everything else.
>   */
>  const struct dma_map_ops powerpc_swiotlb_dma_ops = {
> -	.alloc = __dma_nommu_alloc_coherent,
> -	.free = __dma_nommu_free_coherent,
> +	.alloc = dma_direct_alloc,
> +	.free = dma_direct_free,
>  	.mmap = dma_nommu_mmap_coherent,
>  	.map_sg = swiotlb_map_sg_attrs,
>  	.unmap_sg = swiotlb_unmap_sg_attrs,
> diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
> index 2cfc45acbb52..2b90a403cdac 100644
> --- a/arch/powerpc/kernel/dma.c
> +++ b/arch/powerpc/kernel/dma.c
> @@ -26,75 +26,6 @@
>   * can set archdata.dma_data to an unsigned long holding the offset. By
>   * default the offset is PCI_DRAM_OFFSET.
>   */
> -
> -static u64 __maybe_unused get_pfn_limit(struct device *dev)
> -{
> -	u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
> -	struct dev_archdata __maybe_unused *sd = &dev->archdata;
> -
> -#ifdef CONFIG_SWIOTLB
> -	if (sd->max_direct_dma_addr && dev->dma_ops = &powerpc_swiotlb_dma_ops)
> -		pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
> -#endif
> -
> -	return pfn;
> -}
> -
> -#ifndef CONFIG_NOT_COHERENT_CACHE
> -void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
> -				  dma_addr_t *dma_handle, gfp_t flag,
> -				  unsigned long attrs)
> -{
> -	void *ret;
> -	struct page *page;
> -	int node = dev_to_node(dev);
> -#ifdef CONFIG_FSL_SOC
> -	u64 pfn = get_pfn_limit(dev);
> -	int zone;
> -
> -	/*
> -	 * This code should be OK on other platforms, but we have drivers that
> -	 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
> -	 * whole routine needs some serious cleanup.
> -	 */
> -
> -	zone = dma_pfn_limit_to_zone(pfn);
> -	if (zone < 0) {
> -		dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
> -			__func__, pfn);
> -		return NULL;
> -	}
> -
> -	switch (zone) {
> -	case ZONE_DMA:
> -		flag |= GFP_DMA;
> -		break;
> -#ifdef CONFIG_ZONE_DMA32
> -	case ZONE_DMA32:
> -		flag |= GFP_DMA32;
> -		break;
> -#endif
> -	};
> -#endif /* CONFIG_FSL_SOC */
> -
> -	page = alloc_pages_node(node, flag, get_order(size));
> -	if (page = NULL)
> -		return NULL;
> -	ret = page_address(page);
> -	memset(ret, 0, size);
> -	*dma_handle = phys_to_dma(dev,__pa(ret));
> -
> -	return ret;
> -}
> -
> -void __dma_nommu_free_coherent(struct device *dev, size_t size,
> -				void *vaddr, dma_addr_t dma_handle,
> -				unsigned long attrs)
> -{
> -	free_pages((unsigned long)vaddr, get_order(size));
> -}
> -#endif /* !CONFIG_NOT_COHERENT_CACHE */
> -
>  static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
>  				       dma_addr_t *dma_handle, gfp_t flag,
>  				       unsigned long attrs)
> @@ -105,8 +36,12 @@ static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
>  	 * we can really use the direct ops
>  	 */
>  	if (dma_direct_supported(dev, dev->coherent_dma_mask))
> +#ifdef CONFIG_NOT_COHERENT_CACHE
>  		return __dma_nommu_alloc_coherent(dev, size, dma_handle,
>  						   flag, attrs);
> +#else
> +		return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
> +#endif
>  
>  	/* Ok we can't ... do we have an iommu ? If not, fail */
>  	iommu = get_iommu_table_base(dev);
> @@ -127,8 +62,13 @@ static void dma_nommu_free_coherent(struct device *dev, size_t size,
>  
>  	/* See comments in dma_nommu_alloc_coherent() */
>  	if (dma_direct_supported(dev, dev->coherent_dma_mask))
> +#ifdef CONFIG_NOT_COHERENT_CACHE
>  		return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
>  						  attrs);
> +#else
> +		return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
> +#endif
> +
>  	/* Maybe we used an iommu ... */
>  	iommu = get_iommu_table_base(dev);
>  
> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> index 5c8530d0c611..ec8ed9d7abef 100644
> --- a/arch/powerpc/mm/mem.c
> +++ b/arch/powerpc/mm/mem.c
> @@ -276,25 +276,6 @@ void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
>  	}
>  }
>  
> -/*
> - * Find the least restrictive zone that is entirely below the
> - * specified pfn limit.  Returns < 0 if no suitable zone is found.
> - *
> - * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
> - * systems -- the DMA limit can be higher than any possible real pfn.
> - */
> -int dma_pfn_limit_to_zone(u64 pfn_limit)
> -{
> -	int i;
> -
> -	for (i = TOP_ZONE; i >= 0; i--) {
> -		if (max_zone_pfns[i] <= pfn_limit)
> -			return i;
> -	}
> -
> -	return -EPERM;
> -}
> -
>  /*
>   * paging_init() sets up the page tables - in fact we've already done this.
>   */


  reply	other threads:[~2018-08-09  0:53 UTC|newest]

Thread overview: 156+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-30 16:38 use generic DMA mapping code in powerpc Christoph Hellwig
2018-07-30 16:38 ` Christoph Hellwig
2018-07-30 16:38 ` Christoph Hellwig
2018-07-30 16:38 ` [PATCH 01/20] kernel/dma/direct: take DMA offset into account in dma_direct_supported Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-08 23:44   ` Benjamin Herrenschmidt
2018-08-08 23:44     ` Benjamin Herrenschmidt
2018-08-08 23:44     ` Benjamin Herrenschmidt
2018-08-22  6:53     ` Christoph Hellwig
2018-08-22  6:53       ` Christoph Hellwig
2018-08-22  6:53       ` Christoph Hellwig
2018-08-22 23:59       ` Benjamin Herrenschmidt
2018-08-22 23:59         ` Benjamin Herrenschmidt
2018-08-22 23:59         ` Benjamin Herrenschmidt
2018-08-23  5:24         ` Christoph Hellwig
2018-08-23  5:24           ` Christoph Hellwig
2018-08-23  5:24           ` Christoph Hellwig
2018-08-23  5:24           ` Benjamin Herrenschmidt
2018-08-23  5:24             ` Benjamin Herrenschmidt
2018-08-23  5:24             ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 02/20] kernel/dma/direct: refine dma_direct_alloc zone selection Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-08 23:54   ` Benjamin Herrenschmidt
2018-08-08 23:54     ` Benjamin Herrenschmidt
2018-08-08 23:54     ` Benjamin Herrenschmidt
2018-08-22  6:58     ` Christoph Hellwig
2018-08-22  6:58       ` Christoph Hellwig
2018-08-22  6:58       ` Christoph Hellwig
2018-08-23  0:01       ` Benjamin Herrenschmidt
2018-08-23  0:01         ` Benjamin Herrenschmidt
2018-08-23  0:01         ` Benjamin Herrenschmidt
2018-08-23  5:26         ` Christoph Hellwig
2018-08-23  5:26           ` Christoph Hellwig
2018-08-23  5:26           ` Christoph Hellwig
2018-07-30 16:38 ` [PATCH 03/20] dma-mapping: make the get_required_mask method available unconditionally Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38 ` [PATCH 04/20] ia64: remove get_required_mask implementation Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38 ` [PATCH 05/20] swiotlb: allow the architecture to provide a get_required_mask hook Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-27 16:06   ` Konrad Rzeszutek Wilk
2018-08-27 16:06     ` Konrad Rzeszutek Wilk
2018-08-27 16:06     ` Konrad Rzeszutek Wilk
2018-07-30 16:38 ` [PATCH 06/20] dma-noncoherent: add an optional arch hook for ->get_required_mask Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38 ` [PATCH 07/20] powerpc/dma: remove the unused ARCH_HAS_DMA_MMAP_COHERENT define Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-08 23:56   ` Benjamin Herrenschmidt
2018-08-08 23:56     ` Benjamin Herrenschmidt
2018-08-08 23:56     ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 08/20] powerpc/dma: remove the unused dma_nommu_ops export Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-31 12:16   ` Christoph Hellwig
2018-07-31 12:16     ` Christoph Hellwig
2018-07-31 12:16     ` Christoph Hellwig
2018-08-09  0:01     ` Benjamin Herrenschmidt
2018-08-09  0:01       ` Benjamin Herrenschmidt
2018-08-09  0:01       ` Benjamin Herrenschmidt
2018-08-22  6:45       ` Christoph Hellwig
2018-08-22  6:45         ` Christoph Hellwig
2018-08-22  6:45         ` Christoph Hellwig
2018-08-22 23:50         ` Benjamin Herrenschmidt
2018-08-22 23:50           ` Benjamin Herrenschmidt
2018-08-22 23:50           ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 09/20] powerpc/dma: remove the unused ISA_DMA_THRESHOLD export Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:14   ` Benjamin Herrenschmidt
2018-08-09  0:14     ` Benjamin Herrenschmidt
2018-08-09  0:14     ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 10/20] powerpc/dma-noncoherent: don't disable irqs over kmap_atomic Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:27   ` Benjamin Herrenschmidt
2018-08-09  0:27     ` Benjamin Herrenschmidt
2018-08-09  0:27     ` Benjamin Herrenschmidt
2018-08-22  7:02     ` Christoph Hellwig
2018-08-22  7:02       ` Christoph Hellwig
2018-08-22  7:02       ` Christoph Hellwig
2018-08-22 23:45       ` Benjamin Herrenschmidt
2018-08-22 23:45         ` Benjamin Herrenschmidt
2018-08-22 23:45         ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 11/20] powerpc/dma: split the two __dma_alloc_coherent implementations Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:40   ` Benjamin Herrenschmidt
2018-08-09  0:40     ` Benjamin Herrenschmidt
2018-08-09  0:40     ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 12/20] powerpc/dma: use phys_to_dma instead of get_dma_offset Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:43   ` Benjamin Herrenschmidt
2018-08-09  0:43     ` Benjamin Herrenschmidt
2018-08-09  0:43     ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 13/20] powerpc/dma: remove get_dma_offset Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:45   ` Benjamin Herrenschmidt
2018-08-09  0:45     ` Benjamin Herrenschmidt
2018-08-09  0:45     ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 14/20] powerpc/dma: replace dma_nommu_dma_supported with dma_direct_supported Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:49   ` Benjamin Herrenschmidt
2018-08-09  0:49     ` Benjamin Herrenschmidt
2018-08-09  0:49     ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 15/20] powerpc/dma: remove the unused unmap_page and unmap_sg methods Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:49   ` Benjamin Herrenschmidt
2018-08-09  0:49     ` Benjamin Herrenschmidt
2018-08-09  0:49     ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 16/20] powerpc/dma: use dma_direct_{alloc,free} Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:52   ` Benjamin Herrenschmidt [this message]
2018-08-09  0:52     ` Benjamin Herrenschmidt
2018-08-09  0:52     ` Benjamin Herrenschmidt
2018-08-27  8:51     ` Scott Wood
2018-08-27  8:51       ` Scott Wood
2018-08-27  8:51       ` Scott Wood
2018-07-30 16:38 ` [PATCH 17/20] powerpc/dma-swiotlb: use generic swiotlb_dma_ops Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  0:54   ` Benjamin Herrenschmidt
2018-08-09  0:54     ` Benjamin Herrenschmidt
2018-08-09  0:54     ` Benjamin Herrenschmidt
2018-08-09  1:57     ` Benjamin Herrenschmidt
2018-08-09  1:57       ` Benjamin Herrenschmidt
2018-08-09  1:57       ` Benjamin Herrenschmidt
2018-08-22  7:04       ` Christoph Hellwig
2018-08-22  7:04         ` Christoph Hellwig
2018-08-22  7:04         ` Christoph Hellwig
2018-07-30 16:38 ` [PATCH 18/20] powerpc/dma-noncoherent: use generic dma_noncoherent_ops Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  1:00   ` Benjamin Herrenschmidt
2018-08-09  1:00     ` Benjamin Herrenschmidt
2018-08-09  1:00     ` Benjamin Herrenschmidt
2018-07-30 16:38 ` [PATCH 19/20] powerpc/dma: use the generic dma-direct map_page and map_sg routines Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38 ` [PATCH 20/20] powerpc/dma: remove dma_nommu_mmap_coherent Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-07-30 16:38   ` Christoph Hellwig
2018-08-09  1:05   ` Benjamin Herrenschmidt
2018-08-09  1:05     ` Benjamin Herrenschmidt
2018-08-09  1:05     ` Benjamin Herrenschmidt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4d5c934ea7bc9802eb3653e6afe7583cb05e4484.camel@kernel.crashing.org \
    --to=benh@kernel.crashing.org \
    --cc=fenghua.yu@intel.com \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=oss@buserror.net \
    --cc=paulus@samba.org \
    --cc=robin.murphy@arm.com \
    --cc=tony.luck@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.