linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Robin Murphy <robin.murphy@arm.com>
To: Christoph Hellwig <hch@lst.de>, Will Deacon <will.deacon@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: linux-arm-kernel@lists.infradead.org,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH 08/10] swiotlb: don't dip into swiotlb pool for coherent allocations
Date: Fri, 19 Oct 2018 17:45:09 +0100	[thread overview]
Message-ID: <79603703-c93c-aa14-dc73-bbf6cd49499c@arm.com> (raw)
In-Reply-To: <20181008080246.20543-9-hch@lst.de>

On 08/10/2018 09:02, Christoph Hellwig wrote:
> All architectures that support swiotlb also have a zone that backs up
> these less than full addressing allocations (usually ZONE_DMA32).
> 
> Because of that it is rather pointless to fall back to the global swiotlb
> buffer if the normal dma direct allocation failed - the only thing this
> will do is to eat up bounce buffers that would be more useful to serve
> streaming mappings.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   arch/arm64/mm/dma-mapping.c |   6 +--
>   include/linux/swiotlb.h     |   5 --
>   kernel/dma/swiotlb.c        | 105 +-----------------------------------
>   3 files changed, 5 insertions(+), 111 deletions(-)
> 
> diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
> index 8d91b927e09e..eee6cfcfde9e 100644
> --- a/arch/arm64/mm/dma-mapping.c
> +++ b/arch/arm64/mm/dma-mapping.c
> @@ -112,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
>   		return addr;
>   	}
>   
> -	ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
> +	ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
>   	if (!ptr)
>   		goto no_mem;
>   
> @@ -133,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
>   	return coherent_ptr;
>   
>   no_map:
> -	swiotlb_free(dev, size, ptr, *dma_handle, attrs);
> +	dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
>   no_mem:
>   	return NULL;
>   }
> @@ -151,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size,
>   			return;
>   		vunmap(vaddr);
>   	}
> -	swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
> +	dma_direct_free_pages(dev, size, swiotlb_addr, dma_handle, attrs);
>   }
>   
>   static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index f847c1b265c4..a387b59640a4 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -67,11 +67,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
>   
>   /* Accessory functions. */
>   
> -void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
> -		gfp_t flags, unsigned long attrs);
> -void swiotlb_free(struct device *dev, size_t size, void *vaddr,
> -		dma_addr_t dma_addr, unsigned long attrs);
> -
>   extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
>   				   unsigned long offset, size_t size,
>   				   enum dma_data_direction dir,
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 4d7a4d85d71e..475a41eff3dc 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -622,78 +622,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
>   	}
>   }
>   
> -static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
> -		size_t size)
> -{
> -	u64 mask = DMA_BIT_MASK(32);
> -
> -	if (dev && dev->coherent_dma_mask)
> -		mask = dev->coherent_dma_mask;
> -	return addr + size - 1 <= mask;
> -}
> -
> -static void *
> -swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
> -		unsigned long attrs)
> -{
> -	phys_addr_t phys_addr;
> -
> -	if (swiotlb_force == SWIOTLB_NO_FORCE)
> -		goto out_warn;
> -
> -	phys_addr = swiotlb_tbl_map_single(dev,
> -			__phys_to_dma(dev, io_tlb_start),
> -			0, size, DMA_FROM_DEVICE, attrs);
> -	if (phys_addr == SWIOTLB_MAP_ERROR)
> -		goto out_warn;
> -
> -	*dma_handle = __phys_to_dma(dev, phys_addr);
> -	if (!dma_coherent_ok(dev, *dma_handle, size))
> -		goto out_unmap;
> -
> -	memset(phys_to_virt(phys_addr), 0, size);
> -	return phys_to_virt(phys_addr);
> -
> -out_unmap:
> -	dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
> -		(unsigned long long)dev->coherent_dma_mask,
> -		(unsigned long long)*dma_handle);
> -
> -	/*
> -	 * DMA_TO_DEVICE to avoid memcpy in unmap_single.
> -	 * DMA_ATTR_SKIP_CPU_SYNC is optional.
> -	 */
> -	swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
> -			DMA_ATTR_SKIP_CPU_SYNC);
> -out_warn:
> -	if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
> -		dev_warn(dev,
> -			"swiotlb: coherent allocation failed, size=%zu\n",
> -			size);
> -		dump_stack();
> -	}
> -	return NULL;
> -}
> -
> -static bool swiotlb_free_buffer(struct device *dev, size_t size,
> -		dma_addr_t dma_addr)
> -{
> -	phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
> -
> -	WARN_ON_ONCE(irqs_disabled());
> -
> -	if (!is_swiotlb_buffer(phys_addr))
> -		return false;
> -
> -	/*
> -	 * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
> -	 * DMA_ATTR_SKIP_CPU_SYNC is optional.
> -	 */
> -	swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
> -				 DMA_ATTR_SKIP_CPU_SYNC);
> -	return true;
> -}
> -
>   static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
>   		size_t size, enum dma_data_direction dir, unsigned long attrs)
>   {
> @@ -928,39 +856,10 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
>   	return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
>   }
>   
> -void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> -		gfp_t gfp, unsigned long attrs)
> -{
> -	void *vaddr;
> -
> -	/* temporary workaround: */
> -	if (gfp & __GFP_NOWARN)
> -		attrs |= DMA_ATTR_NO_WARN;
> -
> -	/*
> -	 * Don't print a warning when the first allocation attempt fails.
> -	 * swiotlb_alloc_coherent() will print a warning when the DMA memory
> -	 * allocation ultimately failed.
> -	 */
> -	gfp |= __GFP_NOWARN;
> -
> -	vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
> -	if (!vaddr)
> -		vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
> -	return vaddr;
> -}
> -
> -void swiotlb_free(struct device *dev, size_t size, void *vaddr,
> -		dma_addr_t dma_addr, unsigned long attrs)
> -{
> -	if (!swiotlb_free_buffer(dev, size, dma_addr))
> -		dma_direct_free(dev, size, vaddr, dma_addr, attrs);
> -}
> -
>   const struct dma_map_ops swiotlb_dma_ops = {
>   	.mapping_error		= dma_direct_mapping_error,
> -	.alloc			= swiotlb_alloc,
> -	.free			= swiotlb_free,
> +	.alloc			= dma_direct_alloc,
> +	.free			= dma_direct_free,
>   	.sync_single_for_cpu	= swiotlb_sync_single_for_cpu,
>   	.sync_single_for_device	= swiotlb_sync_single_for_device,
>   	.sync_sg_for_cpu	= swiotlb_sync_sg_for_cpu,
> 

  parent reply	other threads:[~2018-10-19 16:45 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-08  8:02 move swiotlb noncoherent dma support from arm64 to generic code V2 Christoph Hellwig
2018-10-08  8:02 ` [PATCH 01/10] swiotlb: remove a pointless comment Christoph Hellwig
2018-10-11 17:49   ` Robin Murphy
2018-10-19  0:09   ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 02/10] swiotlb: mark is_swiotlb_buffer static Christoph Hellwig
2018-10-11 17:54   ` Robin Murphy
2018-10-19  0:12   ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 03/10] swiotlb: do not panic on mapping failures Christoph Hellwig
2018-10-11 18:06   ` Robin Murphy
2018-10-19  0:18     ` Konrad Rzeszutek Wilk
2018-10-19  0:17   ` Konrad Rzeszutek Wilk
2018-10-19  6:04     ` Christoph Hellwig
2018-10-19 13:45       ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 04/10] swiotlb: remove the overflow buffer Christoph Hellwig
2018-10-11 18:19   ` Robin Murphy
2018-10-12 17:04   ` Catalin Marinas
2018-10-19  0:23   ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 05/10] swiotlb: merge swiotlb_unmap_page and unmap_single Christoph Hellwig
2018-10-18 17:44   ` Robin Murphy
2018-10-19  0:25   ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 06/10] swiotlb: use swiotlb_map_page in swiotlb_map_sg_attrs Christoph Hellwig
2018-10-18 17:53   ` Robin Murphy
2018-10-19  0:33   ` Konrad Rzeszutek Wilk
2018-11-07  1:27   ` John Stultz
2018-11-09  7:49     ` Christoph Hellwig
2018-11-09 16:37       ` Robin Murphy
2018-11-19 19:36         ` Robin Murphy
2018-11-20  9:22           ` Christoph Hellwig
2018-11-13  0:07       ` John Stultz
2018-11-13  0:26         ` John Stultz
2018-11-14 14:13         ` Christoph Hellwig
2018-11-14 16:12           ` Christoph Hellwig
2018-11-19 23:22             ` John Stultz
2018-11-20  9:25               ` Christoph Hellwig
2018-11-23 18:27                 ` Will Deacon
2018-11-23 19:34                   ` Robin Murphy
2018-11-26 19:31                     ` Will Deacon
2018-10-08  8:02 ` [PATCH 07/10] swiotlb: refactor swiotlb_map_page Christoph Hellwig
2018-10-18 18:09   ` Robin Murphy
2018-10-19  0:37     ` Konrad Rzeszutek Wilk
2018-10-19  6:52       ` Christoph Hellwig
2018-10-19 13:46         ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 08/10] swiotlb: don't dip into swiotlb pool for coherent allocations Christoph Hellwig
2018-10-12 17:04   ` Catalin Marinas
2018-10-19  0:40   ` Konrad Rzeszutek Wilk
2018-10-19 16:45   ` Robin Murphy [this message]
2018-10-08  8:02 ` [PATCH 09/10] swiotlb: add support for non-coherent DMA Christoph Hellwig
2018-10-19  0:49   ` Konrad Rzeszutek Wilk
2018-10-22 17:11   ` Robin Murphy
2018-10-26  8:04     ` Christoph Hellwig
2018-10-26  9:59       ` Robin Murphy
2018-10-08  8:02 ` [PATCH 10/10] arm64: use the generic swiotlb_dma_ops Christoph Hellwig
2018-10-12 13:01   ` Robin Murphy
2018-10-12 14:40     ` Christoph Hellwig
2018-10-12 17:05       ` Catalin Marinas
2018-10-22 17:52   ` Robin Murphy
2018-10-26 12:44     ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=79603703-c93c-aa14-dc73-bbf6cd49499c@arm.com \
    --to=robin.murphy@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).