All of lore.kernel.org
 help / color / mirror / Atom feed
From: Hyesoo Yu <hyesoo.yu@samsung.com>
To: sumit.semwal@linaro.org
Cc: minchan@kernel.org, akpm@linux-foundation.org,
	iamjoonsoo.kim@lge.com, joaodias@google.com, linux-mm@kvack.org,
	pullip.cho@samsung.com, surenb@google.com, vbabka@suse.cz,
	afd@ti.com, benjamin.gaignard@linaro.org, lmark@codeaurora.org,
	labbott@redhat.com, Brian.Starkey@arm.com,
	john.stultz@linaro.org, christian.koenig@amd.com,
	linux-media@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linaro-mm-sig@lists.linaro.org, linux-kernel@vger.kernel.org,
	robh+dt@kernel.org, devicetree@vger.kernel.org,
	Hyesoo Yu <hyesoo.yu@samsung.com>
Subject: [PATCH 2/3] dma-buf: heaps: add chunk heap to dmabuf heaps
Date: Tue, 18 Aug 2020 17:04:14 +0900	[thread overview]
Message-ID: <20200818080415.7531-3-hyesoo.yu@samsung.com> (raw)
In-Reply-To: <20200818080415.7531-1-hyesoo.yu@samsung.com>

This patch adds support for a chunk heap that allows for buffers
that are made up of a list of fixed size chunks taken from a CMA.
Chunk sizes are configuratd when the heaps are created.

Signed-off-by: Hyesoo Yu <hyesoo.yu@samsung.com>
---
 drivers/dma-buf/heaps/Kconfig      |   9 ++
 drivers/dma-buf/heaps/Makefile     |   1 +
 drivers/dma-buf/heaps/chunk_heap.c | 222 +++++++++++++++++++++++++++++++++++++
 3 files changed, 232 insertions(+)
 create mode 100644 drivers/dma-buf/heaps/chunk_heap.c

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06..98552fa 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -12,3 +12,12 @@ config DMABUF_HEAPS_CMA
 	  Choose this option to enable dma-buf CMA heap. This heap is backed
 	  by the Contiguous Memory Allocator (CMA). If your system has these
 	  regions, you should say Y here.
+
+config DMABUF_HEAPS_CHUNK
+	tristate "DMA-BUF CHUNK Heap"
+	depends on DMABUF_HEAPS && DMA_CMA
+	help
+	  Choose this option to enable dma-buf CHUNK heap. This heap is backed
+	  by the Contiguous Memory Allocator (CMA) and allocate the buffers that
+	  are made up to a list of fixed size chunks tasken from CMA. Chunk sizes
+	  are configurated when the heaps are created.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 6e54cde..3b2a0986 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -2,3 +2,4 @@
 obj-y					+= heap-helpers.o
 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CHUNK)	+= chunk_heap.o
diff --git a/drivers/dma-buf/heaps/chunk_heap.c b/drivers/dma-buf/heaps/chunk_heap.c
new file mode 100644
index 0000000..1eefaec
--- /dev/null
+++ b/drivers/dma-buf/heaps/chunk_heap.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ION Memory Allocator chunk heap exporter
+ *
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * Author: <hyesoo.yu@samsung.com> for Samsung Electronics.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/cma.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-contiguous.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of.h>
+
+#include "heap-helpers.h"
+
+struct chunk_heap {
+	struct dma_heap *heap;
+	phys_addr_t base;
+	phys_addr_t size;
+	atomic_t cur_pageblock_idx;
+	unsigned int max_num_pageblocks;
+	unsigned int order;
+};
+
+static void chunk_heap_free(struct heap_helper_buffer *buffer)
+{
+	struct chunk_heap *chunk_heap = dma_heap_get_drvdata(buffer->heap);
+	pgoff_t pg;
+
+	for (pg = 0; pg < buffer->pagecount; pg++)
+		__free_pages(buffer->pages[pg], chunk_heap->order);
+	kvfree(buffer->pages);
+	kfree(buffer);
+}
+
+static inline unsigned long chunk_get_next_pfn(struct chunk_heap *chunk_heap)
+{
+	unsigned long i = atomic_inc_return(&chunk_heap->cur_pageblock_idx) %
+		chunk_heap->max_num_pageblocks;
+
+	return PHYS_PFN(chunk_heap->base) + i * pageblock_nr_pages;
+}
+
+static int chunk_alloc_pages(struct chunk_heap *chunk_heap, struct page **pages,
+			     unsigned int order, unsigned int count)
+{
+	unsigned long base;
+	unsigned int i = 0, nr_block = 0, nr_elem, ret;
+
+	while (count) {
+		/*
+		 * If the number of scanned page block is the same as max block,
+		 * the tries of allocation fails.
+		 */
+		if (nr_block++ == chunk_heap->max_num_pageblocks) {
+			ret = -ENOMEM;
+			goto err_bulk;
+		}
+		base = chunk_get_next_pfn(chunk_heap);
+		nr_elem = min_t(unsigned int, count, pageblock_nr_pages >> order);
+		ret = alloc_pages_bulk(base, base + pageblock_nr_pages, MIGRATE_CMA,
+				       GFP_KERNEL, order, nr_elem, pages + i);
+		if (ret < 0)
+			goto err_bulk;
+
+		i += ret;
+		count -= ret;
+	}
+
+	return 0;
+
+err_bulk:
+	while (i-- > 0)
+		__free_pages(pages[i], order);
+
+	return ret;
+}
+
+static int chunk_heap_allocate(struct dma_heap *heap, unsigned long len,
+			     unsigned long fd_flags, unsigned long heap_flags)
+{
+
+	struct chunk_heap *chunk_heap = dma_heap_get_drvdata(heap);
+	struct heap_helper_buffer *helper_buffer;
+	struct dma_buf *dmabuf;
+	unsigned int count = DIV_ROUND_UP(len, PAGE_SIZE << chunk_heap->order);
+	int ret = -ENOMEM;
+
+	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+	if (!helper_buffer)
+		return ret;
+
+	init_heap_helper_buffer(helper_buffer, chunk_heap_free);
+
+	helper_buffer->heap = heap;
+	helper_buffer->size = ALIGN(len, PAGE_SIZE << chunk_heap->order);
+	helper_buffer->pagecount = count;
+	helper_buffer->pages = kvmalloc_array(helper_buffer->pagecount,
+					      sizeof(*helper_buffer->pages), GFP_KERNEL);
+	if (!helper_buffer->pages)
+		goto err0;
+
+	ret = chunk_alloc_pages(chunk_heap, helper_buffer->pages,
+				chunk_heap->order, helper_buffer->pagecount);
+	if (ret < 0)
+		goto err1;
+
+	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+	if (IS_ERR(dmabuf)) {
+		ret = PTR_ERR(dmabuf);
+		goto err2;
+	}
+
+	helper_buffer->dmabuf = dmabuf;
+
+	ret = dma_buf_fd(dmabuf, fd_flags);
+	if (ret < 0) {
+		dma_buf_put(dmabuf);
+		return ret;
+	}
+
+	return ret;
+
+err2:
+	while (count-- > 0)
+		__free_pages(helper_buffer->pages[count], chunk_heap->order);
+err1:
+	kvfree(helper_buffer->pages);
+err0:
+	kfree(helper_buffer);
+
+	return ret;
+}
+
+static void rmem_remove_callback(void *p)
+{
+	of_reserved_mem_device_release((struct device *)p);
+}
+
+static const struct dma_heap_ops chunk_heap_ops = {
+	.allocate = chunk_heap_allocate,
+};
+
+static int chunk_heap_probe(struct platform_device *pdev)
+{
+	struct chunk_heap *chunk_heap;
+	struct reserved_mem *rmem;
+	struct device_node *rmem_np;
+	struct dma_heap_export_info exp_info;
+	unsigned int alignment;
+	int ret;
+
+	ret = of_reserved_mem_device_init(&pdev->dev);
+	if (ret || !pdev->dev.cma_area) {
+		dev_err(&pdev->dev, "The CMA reserved area is not assigned (ret %d)", ret);
+		return -EINVAL;
+	}
+
+	ret = devm_add_action(&pdev->dev, rmem_remove_callback, &pdev->dev);
+	if (ret) {
+		of_reserved_mem_device_release(&pdev->dev);
+		return ret;
+	}
+
+	rmem_np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+	rmem = of_reserved_mem_lookup(rmem_np);
+
+	chunk_heap = devm_kzalloc(&pdev->dev, sizeof(*chunk_heap), GFP_KERNEL);
+	if (!chunk_heap)
+		return -ENOMEM;
+
+	chunk_heap->base = rmem->base;
+	chunk_heap->size = rmem->size;
+	chunk_heap->max_num_pageblocks = rmem->size >> (pageblock_order + PAGE_SHIFT);
+
+	of_property_read_u32(pdev->dev.of_node, "alignment", &alignment);
+	chunk_heap->order = get_order(alignment);
+
+	exp_info.name = rmem->name;
+	exp_info.ops = &chunk_heap_ops;
+	exp_info.priv = chunk_heap;
+
+	chunk_heap->heap = dma_heap_add(&exp_info);
+	if (IS_ERR(chunk_heap->heap))
+		return PTR_ERR(chunk_heap->heap);
+
+	return 0;
+}
+
+static const struct of_device_id chunk_heap_of_match[] = {
+	{ .compatible = "dma_heap,chunk", },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, chunk_heap_of_match);
+
+static struct platform_driver chunk_heap_driver = {
+	.driver		= {
+		.name	= "chunk_heap",
+		.of_match_table = chunk_heap_of_match,
+	},
+	.probe		= chunk_heap_probe,
+};
+
+static int __init chunk_heap_init(void)
+{
+	return platform_driver_register(&chunk_heap_driver);
+}
+module_init(chunk_heap_init);
+MODULE_DESCRIPTION("DMA-BUF Chunk Heap");
+MODULE_LICENSE("GPL v2");
-- 
2.7.4


WARNING: multiple messages have this Message-ID (diff)
From: Hyesoo Yu <hyesoo.yu@samsung.com>
To: sumit.semwal@linaro.org
Cc: dri-devel@lists.freedesktop.org, linux-mm@kvack.org,
	Hyesoo Yu <hyesoo.yu@samsung.com>,
	labbott@redhat.com, linux-media@vger.kernel.org,
	devicetree@vger.kernel.org, linaro-mm-sig@lists.linaro.org,
	robh+dt@kernel.org, pullip.cho@samsung.com, surenb@google.com,
	iamjoonsoo.kim@lge.com, vbabka@suse.cz,
	linux-kernel@vger.kernel.org, lmark@codeaurora.org, afd@ti.com,
	minchan@kernel.org, joaodias@google.com,
	akpm@linux-foundation.org, christian.koenig@amd.com
Subject: [PATCH 2/3] dma-buf: heaps: add chunk heap to dmabuf heaps
Date: Tue, 18 Aug 2020 17:04:14 +0900	[thread overview]
Message-ID: <20200818080415.7531-3-hyesoo.yu@samsung.com> (raw)
In-Reply-To: <20200818080415.7531-1-hyesoo.yu@samsung.com>

This patch adds support for a chunk heap that allows for buffers
that are made up of a list of fixed size chunks taken from a CMA.
Chunk sizes are configuratd when the heaps are created.

Signed-off-by: Hyesoo Yu <hyesoo.yu@samsung.com>
---
 drivers/dma-buf/heaps/Kconfig      |   9 ++
 drivers/dma-buf/heaps/Makefile     |   1 +
 drivers/dma-buf/heaps/chunk_heap.c | 222 +++++++++++++++++++++++++++++++++++++
 3 files changed, 232 insertions(+)
 create mode 100644 drivers/dma-buf/heaps/chunk_heap.c

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06..98552fa 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -12,3 +12,12 @@ config DMABUF_HEAPS_CMA
 	  Choose this option to enable dma-buf CMA heap. This heap is backed
 	  by the Contiguous Memory Allocator (CMA). If your system has these
 	  regions, you should say Y here.
+
+config DMABUF_HEAPS_CHUNK
+	tristate "DMA-BUF CHUNK Heap"
+	depends on DMABUF_HEAPS && DMA_CMA
+	help
+	  Choose this option to enable dma-buf CHUNK heap. This heap is backed
+	  by the Contiguous Memory Allocator (CMA) and allocate the buffers that
+	  are made up to a list of fixed size chunks tasken from CMA. Chunk sizes
+	  are configurated when the heaps are created.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 6e54cde..3b2a0986 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -2,3 +2,4 @@
 obj-y					+= heap-helpers.o
 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CHUNK)	+= chunk_heap.o
diff --git a/drivers/dma-buf/heaps/chunk_heap.c b/drivers/dma-buf/heaps/chunk_heap.c
new file mode 100644
index 0000000..1eefaec
--- /dev/null
+++ b/drivers/dma-buf/heaps/chunk_heap.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ION Memory Allocator chunk heap exporter
+ *
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * Author: <hyesoo.yu@samsung.com> for Samsung Electronics.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/cma.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-contiguous.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of.h>
+
+#include "heap-helpers.h"
+
+struct chunk_heap {
+	struct dma_heap *heap;
+	phys_addr_t base;
+	phys_addr_t size;
+	atomic_t cur_pageblock_idx;
+	unsigned int max_num_pageblocks;
+	unsigned int order;
+};
+
+static void chunk_heap_free(struct heap_helper_buffer *buffer)
+{
+	struct chunk_heap *chunk_heap = dma_heap_get_drvdata(buffer->heap);
+	pgoff_t pg;
+
+	for (pg = 0; pg < buffer->pagecount; pg++)
+		__free_pages(buffer->pages[pg], chunk_heap->order);
+	kvfree(buffer->pages);
+	kfree(buffer);
+}
+
+static inline unsigned long chunk_get_next_pfn(struct chunk_heap *chunk_heap)
+{
+	unsigned long i = atomic_inc_return(&chunk_heap->cur_pageblock_idx) %
+		chunk_heap->max_num_pageblocks;
+
+	return PHYS_PFN(chunk_heap->base) + i * pageblock_nr_pages;
+}
+
+static int chunk_alloc_pages(struct chunk_heap *chunk_heap, struct page **pages,
+			     unsigned int order, unsigned int count)
+{
+	unsigned long base;
+	unsigned int i = 0, nr_block = 0, nr_elem, ret;
+
+	while (count) {
+		/*
+		 * If the number of scanned page block is the same as max block,
+		 * the tries of allocation fails.
+		 */
+		if (nr_block++ == chunk_heap->max_num_pageblocks) {
+			ret = -ENOMEM;
+			goto err_bulk;
+		}
+		base = chunk_get_next_pfn(chunk_heap);
+		nr_elem = min_t(unsigned int, count, pageblock_nr_pages >> order);
+		ret = alloc_pages_bulk(base, base + pageblock_nr_pages, MIGRATE_CMA,
+				       GFP_KERNEL, order, nr_elem, pages + i);
+		if (ret < 0)
+			goto err_bulk;
+
+		i += ret;
+		count -= ret;
+	}
+
+	return 0;
+
+err_bulk:
+	while (i-- > 0)
+		__free_pages(pages[i], order);
+
+	return ret;
+}
+
+static int chunk_heap_allocate(struct dma_heap *heap, unsigned long len,
+			     unsigned long fd_flags, unsigned long heap_flags)
+{
+
+	struct chunk_heap *chunk_heap = dma_heap_get_drvdata(heap);
+	struct heap_helper_buffer *helper_buffer;
+	struct dma_buf *dmabuf;
+	unsigned int count = DIV_ROUND_UP(len, PAGE_SIZE << chunk_heap->order);
+	int ret = -ENOMEM;
+
+	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+	if (!helper_buffer)
+		return ret;
+
+	init_heap_helper_buffer(helper_buffer, chunk_heap_free);
+
+	helper_buffer->heap = heap;
+	helper_buffer->size = ALIGN(len, PAGE_SIZE << chunk_heap->order);
+	helper_buffer->pagecount = count;
+	helper_buffer->pages = kvmalloc_array(helper_buffer->pagecount,
+					      sizeof(*helper_buffer->pages), GFP_KERNEL);
+	if (!helper_buffer->pages)
+		goto err0;
+
+	ret = chunk_alloc_pages(chunk_heap, helper_buffer->pages,
+				chunk_heap->order, helper_buffer->pagecount);
+	if (ret < 0)
+		goto err1;
+
+	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+	if (IS_ERR(dmabuf)) {
+		ret = PTR_ERR(dmabuf);
+		goto err2;
+	}
+
+	helper_buffer->dmabuf = dmabuf;
+
+	ret = dma_buf_fd(dmabuf, fd_flags);
+	if (ret < 0) {
+		dma_buf_put(dmabuf);
+		return ret;
+	}
+
+	return ret;
+
+err2:
+	while (count-- > 0)
+		__free_pages(helper_buffer->pages[count], chunk_heap->order);
+err1:
+	kvfree(helper_buffer->pages);
+err0:
+	kfree(helper_buffer);
+
+	return ret;
+}
+
+static void rmem_remove_callback(void *p)
+{
+	of_reserved_mem_device_release((struct device *)p);
+}
+
+static const struct dma_heap_ops chunk_heap_ops = {
+	.allocate = chunk_heap_allocate,
+};
+
+static int chunk_heap_probe(struct platform_device *pdev)
+{
+	struct chunk_heap *chunk_heap;
+	struct reserved_mem *rmem;
+	struct device_node *rmem_np;
+	struct dma_heap_export_info exp_info;
+	unsigned int alignment;
+	int ret;
+
+	ret = of_reserved_mem_device_init(&pdev->dev);
+	if (ret || !pdev->dev.cma_area) {
+		dev_err(&pdev->dev, "The CMA reserved area is not assigned (ret %d)", ret);
+		return -EINVAL;
+	}
+
+	ret = devm_add_action(&pdev->dev, rmem_remove_callback, &pdev->dev);
+	if (ret) {
+		of_reserved_mem_device_release(&pdev->dev);
+		return ret;
+	}
+
+	rmem_np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+	rmem = of_reserved_mem_lookup(rmem_np);
+
+	chunk_heap = devm_kzalloc(&pdev->dev, sizeof(*chunk_heap), GFP_KERNEL);
+	if (!chunk_heap)
+		return -ENOMEM;
+
+	chunk_heap->base = rmem->base;
+	chunk_heap->size = rmem->size;
+	chunk_heap->max_num_pageblocks = rmem->size >> (pageblock_order + PAGE_SHIFT);
+
+	of_property_read_u32(pdev->dev.of_node, "alignment", &alignment);
+	chunk_heap->order = get_order(alignment);
+
+	exp_info.name = rmem->name;
+	exp_info.ops = &chunk_heap_ops;
+	exp_info.priv = chunk_heap;
+
+	chunk_heap->heap = dma_heap_add(&exp_info);
+	if (IS_ERR(chunk_heap->heap))
+		return PTR_ERR(chunk_heap->heap);
+
+	return 0;
+}
+
+static const struct of_device_id chunk_heap_of_match[] = {
+	{ .compatible = "dma_heap,chunk", },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, chunk_heap_of_match);
+
+static struct platform_driver chunk_heap_driver = {
+	.driver		= {
+		.name	= "chunk_heap",
+		.of_match_table = chunk_heap_of_match,
+	},
+	.probe		= chunk_heap_probe,
+};
+
+static int __init chunk_heap_init(void)
+{
+	return platform_driver_register(&chunk_heap_driver);
+}
+module_init(chunk_heap_init);
+MODULE_DESCRIPTION("DMA-BUF Chunk Heap");
+MODULE_LICENSE("GPL v2");
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2020-08-18  7:46 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20200818074547epcas2p21e0c2442873d03800c7bc2c3e76405d6@epcas2p2.samsung.com>
2020-08-18  8:04 ` [PATCH 0/3] Chunk Heap Support on DMA-HEAP Hyesoo Yu
2020-08-18  8:04   ` Hyesoo Yu
     [not found]   ` <CGME20200818074550epcas2p1e12121bc6e38086277766f08a59767ff@epcas2p1.samsung.com>
2020-08-18  8:04     ` [PATCH 1/3] dma-buf: add missing EXPORT_SYMBOL_GPL() for dma heaps Hyesoo Yu
2020-08-18  8:04       ` Hyesoo Yu
     [not found]   ` <CGME20200818074553epcas2p240c2129fb8186f53e03abb0a0725461c@epcas2p2.samsung.com>
2020-08-18  8:04     ` Hyesoo Yu [this message]
2020-08-18  8:04       ` [PATCH 2/3] dma-buf: heaps: add chunk heap to dmabuf heaps Hyesoo Yu
2020-08-18 10:11       ` David Hildenbrand
2020-08-18 10:11         ` David Hildenbrand
2020-08-18 10:17       ` kernel test robot
2020-08-18 10:17         ` kernel test robot
     [not found]   ` <CGME20200818074554epcas2p2702e648ba975ea6fbe33c84396b152a9@epcas2p2.samsung.com>
2020-08-18  8:04     ` [PATCH 3/3] dma-heap: Devicetree binding for chunk heap Hyesoo Yu
2020-08-18  8:04       ` Hyesoo Yu
2020-08-18 16:48       ` Rob Herring
2020-08-18 16:48         ` Rob Herring
2020-08-21  8:21         ` Hyesoo Yu
2020-08-21  8:21           ` Hyesoo Yu
2020-08-18 10:55   ` [PATCH 0/3] Chunk Heap Support on DMA-HEAP Brian Starkey
2020-08-18 10:55     ` Brian Starkey
2020-08-19  3:46     ` Cho KyongHo
2020-08-19  3:46       ` Cho KyongHo
2020-08-19 13:22       ` Brian Starkey
2020-08-19 13:22         ` Brian Starkey
2020-08-21  7:38         ` Cho KyongHo
2020-08-21  7:38           ` Cho KyongHo
2020-08-18 20:55   ` John Stultz
2020-08-18 20:55     ` John Stultz
2020-08-18 20:55     ` John Stultz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200818080415.7531-3-hyesoo.yu@samsung.com \
    --to=hyesoo.yu@samsung.com \
    --cc=Brian.Starkey@arm.com \
    --cc=afd@ti.com \
    --cc=akpm@linux-foundation.org \
    --cc=benjamin.gaignard@linaro.org \
    --cc=christian.koenig@amd.com \
    --cc=devicetree@vger.kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=joaodias@google.com \
    --cc=john.stultz@linaro.org \
    --cc=labbott@redhat.com \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lmark@codeaurora.org \
    --cc=minchan@kernel.org \
    --cc=pullip.cho@samsung.com \
    --cc=robh+dt@kernel.org \
    --cc=sumit.semwal@linaro.org \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.