linux-media.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Chris Goldsworthy <cgoldswo@codeaurora.org>
To: John Stultz <john.stultz@linaro.org>
Cc: lkml <linux-kernel@vger.kernel.org>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Liam Mark" <lmark@codeaurora.org>,
	"Laura Abbott" <labbott@kernel.org>,
	"Brian Starkey" <Brian.Starkey@arm.com>,
	"Hridya Valsaraju" <hridya@google.com>,
	"Suren Baghdasaryan" <surenb@google.com>,
	"Sandeep Patil" <sspatil@google.com>,
	"Ørjan Eide" <orjan.eide@arm.com>,
	"Robin Murphy" <robin.murphy@arm.com>,
	"Ezequiel Garcia" <ezequiel@collabora.com>,
	"Simon Ser" <contact@emersion.fr>,
	"James Jones" <jajones@nvidia.com>,
	linux-media@vger.kernel.org, dri-devel@lists.freedesktop.org
Subject: Re: [RFC][PATCH 5/6] dma-buf: system_heap: Add pagepool support to system heap
Date: Tue, 29 Sep 2020 21:46:22 -0700	[thread overview]
Message-ID: <1e109a138c86be7b06e20cb30a243fc7@codeaurora.org> (raw)
In-Reply-To: <20200926042453.67517-6-john.stultz@linaro.org>

On 2020-09-25 21:24, John Stultz wrote:
> Reuse/abuse the pagepool code from the network code to speed
> up allocation performance.
> 
> This is similar to the ION pagepool usage, but tries to
> utilize generic code instead of a custom implementation.
> 
> Cc: Sumit Semwal <sumit.semwal@linaro.org>
> Cc: Liam Mark <lmark@codeaurora.org>
> Cc: Laura Abbott <labbott@kernel.org>
> Cc: Brian Starkey <Brian.Starkey@arm.com>
> Cc: Hridya Valsaraju <hridya@google.com>
> Cc: Suren Baghdasaryan <surenb@google.com>
> Cc: Sandeep Patil <sspatil@google.com>
> Cc: Ørjan Eide <orjan.eide@arm.com>
> Cc: Robin Murphy <robin.murphy@arm.com>
> Cc: Ezequiel Garcia <ezequiel@collabora.com>
> Cc: Simon Ser <contact@emersion.fr>
> Cc: James Jones <jajones@nvidia.com>
> Cc: linux-media@vger.kernel.org
> Cc: dri-devel@lists.freedesktop.org
> Signed-off-by: John Stultz <john.stultz@linaro.org>
> ---
>  drivers/dma-buf/heaps/Kconfig       |  1 +
>  drivers/dma-buf/heaps/system_heap.c | 32 +++++++++++++++++++++++++----
>  2 files changed, 29 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/dma-buf/heaps/Kconfig 
> b/drivers/dma-buf/heaps/Kconfig
> index a5eef06c4226..f13cde4321b1 100644
> --- a/drivers/dma-buf/heaps/Kconfig
> +++ b/drivers/dma-buf/heaps/Kconfig
> @@ -1,6 +1,7 @@
>  config DMABUF_HEAPS_SYSTEM
>  	bool "DMA-BUF System Heap"
>  	depends on DMABUF_HEAPS
> +	select PAGE_POOL
>  	help
>  	  Choose this option to enable the system dmabuf heap. The system 
> heap
>  	  is backed by pages from the buddy allocator. If in doubt, say Y.
> diff --git a/drivers/dma-buf/heaps/system_heap.c
> b/drivers/dma-buf/heaps/system_heap.c
> index 882a632e9bb7..9f57b4c8ae69 100644
> --- a/drivers/dma-buf/heaps/system_heap.c
> +++ b/drivers/dma-buf/heaps/system_heap.c
> @@ -20,6 +20,7 @@
>  #include <linux/scatterlist.h>
>  #include <linux/slab.h>
>  #include <linux/vmalloc.h>
> +#include <net/page_pool.h>
> 
>  struct dma_heap *sys_heap;
> 
> @@ -46,6 +47,7 @@ struct dma_heap_attachment {
>  static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, 
> LOW_ORDER_GFP};
>  static const unsigned int orders[] = {8, 4, 0};
>  #define NUM_ORDERS ARRAY_SIZE(orders)
> +struct page_pool *pools[NUM_ORDERS];
> 
>  static struct sg_table *dup_sg_table(struct sg_table *table)
>  {
> @@ -264,13 +266,17 @@ static void system_heap_dma_buf_release(struct
> dma_buf *dmabuf)
>  	struct system_heap_buffer *buffer = dmabuf->priv;
>  	struct sg_table *table;
>  	struct scatterlist *sg;
> -	int i;
> +	int i, j;
> 
>  	table = &buffer->sg_table;
>  	for_each_sg(table->sgl, sg, table->nents, i) {
>  		struct page *page = sg_page(sg);
> 
> -		__free_pages(page, compound_order(page));
> +		for (j = 0; j < NUM_ORDERS; j++) {
> +			if (compound_order(page) == orders[j])
> +				break;
> +		}
> +		page_pool_put_full_page(pools[j], page, false);
>  	}
>  	sg_free_table(table);
>  	kfree(buffer);
> @@ -300,8 +306,7 @@ static struct page
> *alloc_largest_available(unsigned long size,
>  			continue;
>  		if (max_order < orders[i])
>  			continue;
> -
> -		page = alloc_pages(order_flags[i], orders[i]);
> +		page = page_pool_alloc_pages(pools[i], order_flags[i]);
>  		if (!page)
>  			continue;
>  		return page;
> @@ -406,6 +411,25 @@ static const struct dma_heap_ops system_heap_ops = 
> {
>  static int system_heap_create(void)
>  {
>  	struct dma_heap_export_info exp_info;
> +	int i;
> +
> +	for (i = 0; i < NUM_ORDERS; i++) {
> +		struct page_pool_params pp;
> +
> +		memset(&pp, 0, sizeof(pp));
> +		pp.order = orders[i];
> +		pp.dma_dir = DMA_BIDIRECTIONAL;
> +		pools[i] = page_pool_create(&pp);
> +
> +		if (IS_ERR(pools[i])) {
> +			int j;
> +
> +			pr_err("%s: page pool creation failed!\n", __func__);
> +			for (j = 0; j < i; j++)
> +				page_pool_destroy(pools[j]);
> +			return PTR_ERR(pools[i]);
> +		}
> +	}
> 
>  	exp_info.name = "system";
>  	exp_info.ops = &system_heap_ops;

This is cool, I didn't know about this pooling code under /net/core.  
Nice and compact.

-- 
The Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora 
Forum,
a Linux Foundation Collaborative Project

  reply	other threads:[~2020-09-30  4:46 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-26  4:24 [RFC][PATCH 0/6] dma-buf: Performance improvements for system heap John Stultz
2020-09-26  4:24 ` [RFC][PATCH 1/6] dma-buf: system_heap: Rework system heap to use sgtables instead of pagelists John Stultz
2020-09-26  4:24 ` [RFC][PATCH 2/6] dma-buf: heaps: Move heap-helper logic into the cma_heap implementation John Stultz
2020-09-26  4:24 ` [RFC][PATCH 3/6] dma-buf: heaps: Remove heap-helpers code John Stultz
2020-09-26  4:24 ` [RFC][PATCH 4/6] dma-buf: system_heap: Allocate higher order pages if available John Stultz
2020-09-26  4:24 ` [RFC][PATCH 5/6] dma-buf: system_heap: Add pagepool support to system heap John Stultz
2020-09-30  4:46   ` Chris Goldsworthy [this message]
2020-10-01 14:49     ` Chris Goldsworthy
2020-10-01 18:28       ` John Stultz
2020-10-01 22:07     ` John Stultz
2020-09-26  4:24 ` [RFC][PATCH 6/6] dma-buf: heaps: Skip sync if not mapped John Stultz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1e109a138c86be7b06e20cb30a243fc7@codeaurora.org \
    --to=cgoldswo@codeaurora.org \
    --cc=Brian.Starkey@arm.com \
    --cc=contact@emersion.fr \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=ezequiel@collabora.com \
    --cc=hridya@google.com \
    --cc=jajones@nvidia.com \
    --cc=john.stultz@linaro.org \
    --cc=labbott@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=lmark@codeaurora.org \
    --cc=orjan.eide@arm.com \
    --cc=robin.murphy@arm.com \
    --cc=sspatil@google.com \
    --cc=sumit.semwal@linaro.org \
    --cc=surenb@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).