All of lore.kernel.org
 help / color / mirror / Atom feed
From: Hillf Danton <hdanton@sina.com>
To: Christoph Hellwig <hch@lst.de>,
	Tobias Klausmann <tobias.johannes.klausmann@mni.thm.de>
Cc: netdev@vger.kernel.org, linux-wireless@vger.kernel.org,
	linux-kernel@vger.kernel.org, ath10k@lists.infradead.org,
	Nicolin Chen <nicoleotsuka@gmail.com>,
	iommu@lists.linux-foundation.org, tobias.klausmann@freenet.de,
	robin.murphy@arm.com, davem@davemloft.net, kvalo@codeaurora.org
Subject: Re: regression in ath10k dma allocation
Date: Tue, 20 Aug 2019 14:58:33 +0800	[thread overview]
Message-ID: <20190820065833.1628-1-hdanton@sina.com> (raw)
In-Reply-To: <acd7a4b0-fde8-1aa2-af07-2b469e5d5ca7@mni.thm.de>


On Tue, 20 Aug 2019 05:05:14 +0200 Christoph Hellwig wrote:
> 
> Tobias, plase try this patch:
> 
A minute!

> --
> >From 88c590a2ecafc8279388f25bfbe1ead8ea3507a6 Mon Sep 17 00:00:00 2001
> From: Christoph Hellwig <hch@lst.de>
> Date: Tue, 20 Aug 2019 11:45:49 +0900
> Subject: dma-direct: fix zone selection after an unaddressable CMA allocation
> 
> The new dma_alloc_contiguous hides if we allocate CMA or regular
> pages, and thus fails to retry a ZONE_NORMAL allocation if the CMA
> allocation succeeds but isn't addressable.  That means we either fail
> outright or dip into a small zone that might not succeed either.
> 
> Thanks to Hillf Danton for debugging this issue.
> 
> Fixes: b1d2dc009dec ("dma-contiguous: add dma_{alloc,free}_contiguous() helpers")
> Reported-by: Tobias Klausmann <tobias.johannes.klausmann@mni.thm.de>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/iommu/dma-iommu.c      | 3 +++
>  include/linux/dma-contiguous.h | 5 +----
>  kernel/dma/contiguous.c        | 9 +++------
>  kernel/dma/direct.c            | 7 ++++++-
>  4 files changed, 13 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index d991d40f797f..f68a62c3c32b 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -965,10 +965,13 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
>  {
>  	bool coherent = dev_is_dma_coherent(dev);
>  	size_t alloc_size = PAGE_ALIGN(size);
> +	int node = dev_to_node(dev);
>  	struct page *page = NULL;
>  	void *cpu_addr;
>  
>  	page = dma_alloc_contiguous(dev, alloc_size, gfp);
> +	if (!page)
> +		page = alloc_pages_node(node, gfp, get_order(alloc_size));
>  	if (!page)
>  		return NULL;
>  
> diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
> index c05d4e661489..03f8e98e3bcc 100644
> --- a/include/linux/dma-contiguous.h
> +++ b/include/linux/dma-contiguous.h
> @@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>  static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
>  		gfp_t gfp)
>  {
> -	int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
> -	size_t align = get_order(PAGE_ALIGN(size));
> -
> -	return alloc_pages_node(node, gfp, align);
> +	return NULL;
>  }
>  
>  static inline void dma_free_contiguous(struct device *dev, struct page *page,
> diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
> index 2bd410f934b3..e6b450fdbeb6 100644
> --- a/kernel/dma/contiguous.c
> +++ b/kernel/dma/contiguous.c
> @@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>   */
>  struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
>  {
> -	int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
> -	size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> -	size_t align = get_order(PAGE_ALIGN(size));
> +	size_t count = size >> PAGE_SHIFT;
>  	struct page *page = NULL;
>  	struct cma *cma = NULL;
>  
> @@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
>  
>  	/* CMA can be used only in the context which permits sleeping */
>  	if (cma && gfpflags_allow_blocking(gfp)) {
> +		size_t align = get_order(size);
>  		size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
>  
>  		page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
>  	}
>  
> -	/* Fallback allocation of normal pages */
> -	if (!page)
> -		page = alloc_pages_node(node, gfp, align);
>  	return page;
>  }
>  
> @@ -258,6 +254,7 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
>   * dma_free_contiguous() - release allocated pages
>   * @dev:   Pointer to device for which the pages were allocated.
>   * @page:  Pointer to the allocated pages.
> +	int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
>   * @size:  Size of allocated pages.
>   *
>   * This function releases memory allocated by dma_alloc_contiguous(). As the
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 795c9b095d75..d82d184463ce 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
>  struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>  		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
>  {
> +	size_t alloc_size = PAGE_ALIGN(size);
> +	int node = dev_to_node(dev);
>  	struct page *page = NULL;
>  	u64 phys_mask;
>  
> @@ -95,8 +97,11 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>  	gfp &= ~__GFP_ZERO;
>  	gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
>  			&phys_mask);
> +	page = dma_alloc_contiguous(dev, alloc_size, gfp);
> +	if (page && dma_coherent_ok(dev, page_to_phys(page), size))
> +		return page;
	else
		memory leak;
>  again:
> -	page = dma_alloc_contiguous(dev, size, gfp);
> +	page = alloc_pages_node(node, gfp, get_order(alloc_size));
>  	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
>  		dma_free_contiguous(dev, page, size);
>  		page = NULL;
> -- 
> 2.20.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Hillf Danton <hdanton@sina.com>
To: Christoph Hellwig <hch@lst.de>,
	Tobias Klausmann <tobias.johannes.klausmann@mni.thm.de>
Cc: netdev@vger.kernel.org, linux-wireless@vger.kernel.org,
	linux-kernel@vger.kernel.org, ath10k@lists.infradead.org,
	Nicolin Chen <nicoleotsuka@gmail.com>,
	iommu@lists.linux-foundation.org, tobias.klausmann@freenet.de,
	robin.murphy@arm.com, davem@davemloft.net, kvalo@codeaurora.org,
	m.szyprowski@samsung.com
Subject: Re: regression in ath10k dma allocation
Date: Tue, 20 Aug 2019 14:58:33 +0800	[thread overview]
Message-ID: <20190820065833.1628-1-hdanton@sina.com> (raw)
In-Reply-To: <acd7a4b0-fde8-1aa2-af07-2b469e5d5ca7@mni.thm.de>


On Tue, 20 Aug 2019 05:05:14 +0200 Christoph Hellwig wrote:
> 
> Tobias, plase try this patch:
> 
A minute!

> --
> >From 88c590a2ecafc8279388f25bfbe1ead8ea3507a6 Mon Sep 17 00:00:00 2001
> From: Christoph Hellwig <hch@lst.de>
> Date: Tue, 20 Aug 2019 11:45:49 +0900
> Subject: dma-direct: fix zone selection after an unaddressable CMA allocation
> 
> The new dma_alloc_contiguous hides if we allocate CMA or regular
> pages, and thus fails to retry a ZONE_NORMAL allocation if the CMA
> allocation succeeds but isn't addressable.  That means we either fail
> outright or dip into a small zone that might not succeed either.
> 
> Thanks to Hillf Danton for debugging this issue.
> 
> Fixes: b1d2dc009dec ("dma-contiguous: add dma_{alloc,free}_contiguous() helpers")
> Reported-by: Tobias Klausmann <tobias.johannes.klausmann@mni.thm.de>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/iommu/dma-iommu.c      | 3 +++
>  include/linux/dma-contiguous.h | 5 +----
>  kernel/dma/contiguous.c        | 9 +++------
>  kernel/dma/direct.c            | 7 ++++++-
>  4 files changed, 13 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index d991d40f797f..f68a62c3c32b 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -965,10 +965,13 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
>  {
>  	bool coherent = dev_is_dma_coherent(dev);
>  	size_t alloc_size = PAGE_ALIGN(size);
> +	int node = dev_to_node(dev);
>  	struct page *page = NULL;
>  	void *cpu_addr;
>  
>  	page = dma_alloc_contiguous(dev, alloc_size, gfp);
> +	if (!page)
> +		page = alloc_pages_node(node, gfp, get_order(alloc_size));
>  	if (!page)
>  		return NULL;
>  
> diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
> index c05d4e661489..03f8e98e3bcc 100644
> --- a/include/linux/dma-contiguous.h
> +++ b/include/linux/dma-contiguous.h
> @@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>  static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
>  		gfp_t gfp)
>  {
> -	int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
> -	size_t align = get_order(PAGE_ALIGN(size));
> -
> -	return alloc_pages_node(node, gfp, align);
> +	return NULL;
>  }
>  
>  static inline void dma_free_contiguous(struct device *dev, struct page *page,
> diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
> index 2bd410f934b3..e6b450fdbeb6 100644
> --- a/kernel/dma/contiguous.c
> +++ b/kernel/dma/contiguous.c
> @@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>   */
>  struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
>  {
> -	int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
> -	size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> -	size_t align = get_order(PAGE_ALIGN(size));
> +	size_t count = size >> PAGE_SHIFT;
>  	struct page *page = NULL;
>  	struct cma *cma = NULL;
>  
> @@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
>  
>  	/* CMA can be used only in the context which permits sleeping */
>  	if (cma && gfpflags_allow_blocking(gfp)) {
> +		size_t align = get_order(size);
>  		size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
>  
>  		page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
>  	}
>  
> -	/* Fallback allocation of normal pages */
> -	if (!page)
> -		page = alloc_pages_node(node, gfp, align);
>  	return page;
>  }
>  
> @@ -258,6 +254,7 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
>   * dma_free_contiguous() - release allocated pages
>   * @dev:   Pointer to device for which the pages were allocated.
>   * @page:  Pointer to the allocated pages.
> +	int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
>   * @size:  Size of allocated pages.
>   *
>   * This function releases memory allocated by dma_alloc_contiguous(). As the
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 795c9b095d75..d82d184463ce 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
>  struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>  		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
>  {
> +	size_t alloc_size = PAGE_ALIGN(size);
> +	int node = dev_to_node(dev);
>  	struct page *page = NULL;
>  	u64 phys_mask;
>  
> @@ -95,8 +97,11 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
>  	gfp &= ~__GFP_ZERO;
>  	gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
>  			&phys_mask);
> +	page = dma_alloc_contiguous(dev, alloc_size, gfp);
> +	if (page && dma_coherent_ok(dev, page_to_phys(page), size))
> +		return page;
	else
		memory leak;
>  again:
> -	page = dma_alloc_contiguous(dev, size, gfp);
> +	page = alloc_pages_node(node, gfp, get_order(alloc_size));
>  	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
>  		dma_free_contiguous(dev, page, size);
>  		page = NULL;
> -- 
> 2.20.1


_______________________________________________
ath10k mailing list
ath10k@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/ath10k

  parent reply	other threads:[~2019-08-20  7:00 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-16 16:08 regression in ath10k dma allocation Tobias Klausmann
2019-08-16 16:08 ` Tobias Klausmann
2019-08-16 16:08 ` Tobias Klausmann
2019-08-16 16:43 ` Christoph Hellwig
2019-08-16 16:43   ` Christoph Hellwig
2019-08-16 16:43   ` Christoph Hellwig
2019-08-16 20:16   ` Tobias Klausmann
2019-08-16 20:16     ` Tobias Klausmann
2019-08-16 20:16     ` Tobias Klausmann
2019-08-16 22:25     ` Nicolin Chen
2019-08-16 22:25       ` Nicolin Chen
2019-08-16 22:25       ` Nicolin Chen
2019-08-16 22:42       ` Tobias Klausmann
2019-08-16 22:42         ` Tobias Klausmann
2019-08-16 22:42         ` Tobias Klausmann
2019-08-18  3:13       ` Hillf Danton
2019-08-18  3:13         ` Hillf Danton
2019-08-18 22:38         ` Tobias Klausmann
2019-08-18 22:38           ` Tobias Klausmann
2019-08-18 22:38           ` Tobias Klausmann
2019-08-20  1:58           ` Nicolin Chen
2019-08-20  1:58             ` Nicolin Chen
2019-08-20  1:58             ` Nicolin Chen
2019-08-20  2:14             ` Christoph Hellwig
2019-08-20  2:14               ` Christoph Hellwig
2019-08-20  2:14               ` Christoph Hellwig
2019-08-20  3:05           ` Christoph Hellwig
2019-08-20  3:05             ` Christoph Hellwig
2019-08-20  3:05             ` Christoph Hellwig
2019-08-20  6:58           ` Hillf Danton [this message]
2019-08-20  6:58             ` Hillf Danton
2019-08-20  7:12             ` Christoph Hellwig
2019-08-20  7:12               ` Christoph Hellwig
2019-08-20  7:12               ` Christoph Hellwig
2019-08-20 20:24               ` Tobias Klausmann
2019-08-20 20:24                 ` Tobias Klausmann
2019-08-20 20:24                 ` Tobias Klausmann
2019-08-20  2:29 Hillf Danton
2019-08-20  2:29 ` Hillf Danton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190820065833.1628-1-hdanton@sina.com \
    --to=hdanton@sina.com \
    --cc=ath10k@lists.infradead.org \
    --cc=davem@davemloft.net \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=kvalo@codeaurora.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-wireless@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=nicoleotsuka@gmail.com \
    --cc=robin.murphy@arm.com \
    --cc=tobias.johannes.klausmann@mni.thm.de \
    --cc=tobias.klausmann@freenet.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.