All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Mauro Carvalho Chehab <mchehab@kernel.org>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Joonyoung Shim <jy0922.shim@samsung.com>,
	Seung-Woo Kim <sw0312.kim@samsung.com>,
	Kyungmin Park <kyungmin.park@samsung.com>,
	Ben Skeggs <bskeggs@redhat.com>, Pawel Osciak <pawel@osciak.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Matt Porter <mporter@kernel.crashing.org>,
	iommu@lists.linux-foundation.org
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-media@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-ia64@vger.kernel.org,
	linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org,
	linux-samsung-soc@vger.kernel.org, nouveau@lists.freedesktop.org,
	netdev@vger.kernel.org, linux-nvme@lists.infradead.org,
	linux-scsi@vger.kernel.org, linux-mm@kvack.org,
	alsa-devel@alsa-project.org
Subject: [PATCH 26/28] dmapool: add dma_alloc_pages support
Date: Wed, 19 Aug 2020 08:55:53 +0200	[thread overview]
Message-ID: <20200819065555.1802761-27-hch@lst.de> (raw)
In-Reply-To: <20200819065555.1802761-1-hch@lst.de>

Add an new variant of a dmapool that uses non-coherent memory from
dma_alloc_pages.  Unlike the existing mempool_create this one
initialized a pool allocated by the caller to avoid a pointless extra
allocation.  At some point it might be worth to also switch the coherent
allocation over to a similar dma_pool_init_coherent helper, but that is
better done as a separate series including a few conversions.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/dmapool.h |  23 ++++-
 mm/dmapool.c            | 211 +++++++++++++++++++++++++---------------
 2 files changed, 154 insertions(+), 80 deletions(-)

diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb423840..1387525c4e52e8 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,10 @@
 #ifndef LINUX_DMAPOOL_H
 #define	LINUX_DMAPOOL_H
 
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 
@@ -18,11 +22,28 @@ struct device;
 
 #ifdef CONFIG_HAS_DMA
 
+struct dma_pool {		/* the pool */
+	struct list_head page_list;
+	spinlock_t lock;
+	size_t size;
+	struct device *dev;
+	size_t allocation;
+	size_t boundary;
+	bool is_coherent;
+	enum dma_data_direction dir;
+	char name[32];
+	struct list_head pools;
+};
+
 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 
 			size_t size, size_t align, size_t allocation);
-
 void dma_pool_destroy(struct dma_pool *pool);
 
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir);
+void dma_pool_exit(struct dma_pool *pool);
+
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle);
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index f9fb9bbd733e0f..c60a48b22c8d6a 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -6,10 +6,10 @@
  * Copyright 2007 Intel Corporation
  *   Author: Matthew Wilcox <willy@linux.intel.com>
  *
- * This allocator returns small blocks of a given size which are DMA-able by
- * the given device.  It uses the dma_alloc_coherent page allocator to get
- * new pages, then splits them up into blocks of the required size.
- * Many older drivers still have their own code to do this.
+ * This allocator returns small blocks of a given size which are DMA-able by the
+ * given device.  It either uses the dma_alloc_coherent or the dma_alloc_pages
+ * allocator to get new pages, then splits them up into blocks of the required
+ * size.
  *
  * The current design of this allocator is fairly simple.  The pool is
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
@@ -39,17 +39,6 @@
 #define DMAPOOL_DEBUG 1
 #endif
 
-struct dma_pool {		/* the pool */
-	struct list_head page_list;
-	spinlock_t lock;
-	size_t size;
-	struct device *dev;
-	size_t allocation;
-	size_t boundary;
-	char name[32];
-	struct list_head pools;
-};
-
 struct dma_page {		/* cacheable header for 'allocation' bytes */
 	struct list_head page_list;
 	void *vaddr;
@@ -104,74 +93,40 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 
 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
 
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @boundary: returned blocks won't cross this power of two boundary
- * Context: not in_interrupt()
- *
- * Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory.  Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives.  The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary.  This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- *
- * Return: a dma allocation pool with the requested characteristics, or
- * %NULL if one can't be created.
- */
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
-				 size_t size, size_t align, size_t boundary)
+static int __dma_pool_init(struct device *dev, struct dma_pool *pool,
+		const char *name, size_t size, size_t align, size_t boundary)
 {
-	struct dma_pool *retval;
 	size_t allocation;
 	bool empty = false;
 
 	if (align == 0)
 		align = 1;
-	else if (align & (align - 1))
-		return NULL;
+	if (align & (align - 1))
+		return -EINVAL;
 
 	if (size == 0)
-		return NULL;
-	else if (size < 4)
-		size = 4;
-
-	size = ALIGN(size, align);
+		return -EINVAL;
+	size = ALIGN(min_t(size_t, size, 4), align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
 
 	if (!boundary)
 		boundary = allocation;
-	else if ((boundary < size) || (boundary & (boundary - 1)))
-		return NULL;
-
-	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
-	if (!retval)
-		return retval;
-
-	strlcpy(retval->name, name, sizeof(retval->name));
-
-	retval->dev = dev;
-
-	INIT_LIST_HEAD(&retval->page_list);
-	spin_lock_init(&retval->lock);
-	retval->size = size;
-	retval->boundary = boundary;
-	retval->allocation = allocation;
-
-	INIT_LIST_HEAD(&retval->pools);
+	if (boundary < size || (boundary & (boundary - 1)))
+		return -EINVAL;
+
+	strlcpy(pool->name, name, sizeof(pool->name));
+	pool->dev = dev;
+	INIT_LIST_HEAD(&pool->page_list);
+	spin_lock_init(&pool->lock);
+	pool->size = size;
+	pool->boundary = boundary;
+	pool->allocation = allocation;
+	INIT_LIST_HEAD(&pool->pools);
 
 	/*
 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
 	 * pools_reg_lock ensures that there is not a race between
-	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
+	 * __dma_pool_init() and dma_pool_exit() or within dma_pool_create()
 	 * when the first invocation of dma_pool_create() failed on
 	 * device_create_file() and the second assumes that it has been done (I
 	 * know it is a short window).
@@ -180,7 +135,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	mutex_lock(&pools_lock);
 	if (list_empty(&dev->dma_pools))
 		empty = true;
-	list_add(&retval->pools, &dev->dma_pools);
+	list_add(&pool->pools, &dev->dma_pools);
 	mutex_unlock(&pools_lock);
 	if (empty) {
 		int err;
@@ -188,18 +143,94 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 		err = device_create_file(dev, &dev_attr_pools);
 		if (err) {
 			mutex_lock(&pools_lock);
-			list_del(&retval->pools);
+			list_del(&pool->pools);
 			mutex_unlock(&pools_lock);
 			mutex_unlock(&pools_reg_lock);
-			kfree(retval);
-			return NULL;
+			return err;
 		}
 	}
 	mutex_unlock(&pools_reg_lock);
-	return retval;
+	return 0;
+}
+
+/**
+ * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @boundary: returned blocks won't cross this power of two boundary
+ * Context: not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc()
+ * may be used to allocate memory.  Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives.  The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ *  Return: a dma allocation pool with the requested characteristics, or
+ * %NULL if one can't be created.
+ */
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+				 size_t size, size_t align, size_t boundary)
+{
+	struct dma_pool *pool;
+
+	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, dev_to_node(dev));
+	if (!pool)
+		return NULL;
+	if (__dma_pool_init(dev, pool, name, size, align, boundary))
+		goto out_free_pool;
+	pool->is_coherent = true;
+	return pool;
+out_free_pool:
+	kfree(pool);
+	return NULL;
 }
 EXPORT_SYMBOL(dma_pool_create);
 
+/**
+ * dma_pool_init - initialize a pool DMA addressable memory
+ * @dev:	device that will be doing the DMA
+ * @pool:	pool to initialize
+ * @name:	name of pool, for diagnostics
+ * @size:	size of the blocks in this pool.
+ * @align:	alignment requirement for blocks; must be a power of two
+ * @boundary:	returned blocks won't cross this power of two boundary
+ * @dir:	DMA direction the allocations are going to be used for
+ *
+ * Context:	not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc() may be used to allocate memory.
+ * Such memory will have the same semantics as memory returned from
+ * dma_alloc_pages(), that is ownership needs to be transferred to and from the
+ * device.  The actual size of blocks allocated may be larger than requested
+ * because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir)
+{
+	int ret;
+
+	ret = __dma_pool_init(dev, pool, name, size, align, boundary);
+	if (ret)
+		return ret;
+	pool->is_coherent = false;
+	pool->dir = dir;
+	return 0;
+}
+EXPORT_SYMBOL(dma_pool_init);
+
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
 	unsigned int offset = 0;
@@ -223,8 +254,12 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 	page = kmalloc(sizeof(*page), mem_flags);
 	if (!page)
 		return NULL;
-	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
-					 &page->dma, mem_flags);
+	if (pool->is_coherent)
+		page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
+						 &page->dma, mem_flags);
+	else
+		page->vaddr = dma_alloc_pages(pool->dev, pool->allocation,
+					      &page->dma, pool->dir, mem_flags);
 	if (page->vaddr) {
 #ifdef	DMAPOOL_DEBUG
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
@@ -251,20 +286,25 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 #ifdef	DMAPOOL_DEBUG
 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
-	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+	if (pool->is_coherent)
+		dma_free_coherent(pool->dev, pool->allocation, page->vaddr,
+				  dma);
+	else
+		dma_free_pages(pool->dev, pool->allocation, page->vaddr, dma,
+			       pool->dir);
 	list_del(&page->page_list);
 	kfree(page);
 }
 
 /**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * dma_pool_exit - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
  * Context: !in_interrupt()
  *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
+ * Caller guarantees that no more memory from the pool is in use, and that
+ * nothing will try to use the pool after this call.
  */
-void dma_pool_destroy(struct dma_pool *pool)
+void dma_pool_exit(struct dma_pool *pool)
 {
 	bool empty = false;
 
@@ -299,7 +339,20 @@ void dma_pool_destroy(struct dma_pool *pool)
 		} else
 			pool_free_page(pool, page);
 	}
+}
+EXPORT_SYMBOL(dma_pool_exit);
 
+/**
+ * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void dma_pool_destroy(struct dma_pool *pool)
+{
+	dma_pool_exit(pool);
 	kfree(pool);
 }
 EXPORT_SYMBOL(dma_pool_destroy);
-- 
2.28.0


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Mauro Carvalho Chehab <mchehab@kernel.org>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Joonyoung Shim <jy0922.shim@samsung.com>,
	Seung-Woo Kim <sw0312.kim@samsung.com>,
	Kyungmin Park <kyungmin.park@samsung.com>,
	Ben Skeggs <bskeggs@redhat.com>, Pawel Osciak <pawel@osciak.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Matt Porter <mporter@kernel.crashing.org>,
	iommu@lists.linux-foundation.org
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	alsa-devel@alsa-project.org, linux-samsung-soc@vger.kernel.org,
	linux-ia64@vger.kernel.org, linux-scsi@vger.kernel.org,
	linux-parisc@vger.kernel.org, linux-doc@vger.kernel.org,
	nouveau@lists.freedesktop.org, linux-kernel@vger.kernel.org,
	linux-nvme@lists.infradead.org, linux-mips@vger.kernel.org,
	linux-mm@kvack.org, netdev@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-media@vger.kernel.org
Subject: [PATCH 26/28] dmapool: add dma_alloc_pages support
Date: Wed, 19 Aug 2020 08:55:53 +0200	[thread overview]
Message-ID: <20200819065555.1802761-27-hch@lst.de> (raw)
In-Reply-To: <20200819065555.1802761-1-hch@lst.de>

Add an new variant of a dmapool that uses non-coherent memory from
dma_alloc_pages.  Unlike the existing mempool_create this one
initialized a pool allocated by the caller to avoid a pointless extra
allocation.  At some point it might be worth to also switch the coherent
allocation over to a similar dma_pool_init_coherent helper, but that is
better done as a separate series including a few conversions.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/dmapool.h |  23 ++++-
 mm/dmapool.c            | 211 +++++++++++++++++++++++++---------------
 2 files changed, 154 insertions(+), 80 deletions(-)

diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb423840..1387525c4e52e8 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,10 @@
 #ifndef LINUX_DMAPOOL_H
 #define	LINUX_DMAPOOL_H
 
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 
@@ -18,11 +22,28 @@ struct device;
 
 #ifdef CONFIG_HAS_DMA
 
+struct dma_pool {		/* the pool */
+	struct list_head page_list;
+	spinlock_t lock;
+	size_t size;
+	struct device *dev;
+	size_t allocation;
+	size_t boundary;
+	bool is_coherent;
+	enum dma_data_direction dir;
+	char name[32];
+	struct list_head pools;
+};
+
 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 
 			size_t size, size_t align, size_t allocation);
-
 void dma_pool_destroy(struct dma_pool *pool);
 
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir);
+void dma_pool_exit(struct dma_pool *pool);
+
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle);
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index f9fb9bbd733e0f..c60a48b22c8d6a 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -6,10 +6,10 @@
  * Copyright 2007 Intel Corporation
  *   Author: Matthew Wilcox <willy@linux.intel.com>
  *
- * This allocator returns small blocks of a given size which are DMA-able by
- * the given device.  It uses the dma_alloc_coherent page allocator to get
- * new pages, then splits them up into blocks of the required size.
- * Many older drivers still have their own code to do this.
+ * This allocator returns small blocks of a given size which are DMA-able by the
+ * given device.  It either uses the dma_alloc_coherent or the dma_alloc_pages
+ * allocator to get new pages, then splits them up into blocks of the required
+ * size.
  *
  * The current design of this allocator is fairly simple.  The pool is
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
@@ -39,17 +39,6 @@
 #define DMAPOOL_DEBUG 1
 #endif
 
-struct dma_pool {		/* the pool */
-	struct list_head page_list;
-	spinlock_t lock;
-	size_t size;
-	struct device *dev;
-	size_t allocation;
-	size_t boundary;
-	char name[32];
-	struct list_head pools;
-};
-
 struct dma_page {		/* cacheable header for 'allocation' bytes */
 	struct list_head page_list;
 	void *vaddr;
@@ -104,74 +93,40 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 
 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
 
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @boundary: returned blocks won't cross this power of two boundary
- * Context: not in_interrupt()
- *
- * Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory.  Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives.  The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary.  This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- *
- * Return: a dma allocation pool with the requested characteristics, or
- * %NULL if one can't be created.
- */
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
-				 size_t size, size_t align, size_t boundary)
+static int __dma_pool_init(struct device *dev, struct dma_pool *pool,
+		const char *name, size_t size, size_t align, size_t boundary)
 {
-	struct dma_pool *retval;
 	size_t allocation;
 	bool empty = false;
 
 	if (align == 0)
 		align = 1;
-	else if (align & (align - 1))
-		return NULL;
+	if (align & (align - 1))
+		return -EINVAL;
 
 	if (size == 0)
-		return NULL;
-	else if (size < 4)
-		size = 4;
-
-	size = ALIGN(size, align);
+		return -EINVAL;
+	size = ALIGN(min_t(size_t, size, 4), align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
 
 	if (!boundary)
 		boundary = allocation;
-	else if ((boundary < size) || (boundary & (boundary - 1)))
-		return NULL;
-
-	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
-	if (!retval)
-		return retval;
-
-	strlcpy(retval->name, name, sizeof(retval->name));
-
-	retval->dev = dev;
-
-	INIT_LIST_HEAD(&retval->page_list);
-	spin_lock_init(&retval->lock);
-	retval->size = size;
-	retval->boundary = boundary;
-	retval->allocation = allocation;
-
-	INIT_LIST_HEAD(&retval->pools);
+	if (boundary < size || (boundary & (boundary - 1)))
+		return -EINVAL;
+
+	strlcpy(pool->name, name, sizeof(pool->name));
+	pool->dev = dev;
+	INIT_LIST_HEAD(&pool->page_list);
+	spin_lock_init(&pool->lock);
+	pool->size = size;
+	pool->boundary = boundary;
+	pool->allocation = allocation;
+	INIT_LIST_HEAD(&pool->pools);
 
 	/*
 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
 	 * pools_reg_lock ensures that there is not a race between
-	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
+	 * __dma_pool_init() and dma_pool_exit() or within dma_pool_create()
 	 * when the first invocation of dma_pool_create() failed on
 	 * device_create_file() and the second assumes that it has been done (I
 	 * know it is a short window).
@@ -180,7 +135,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	mutex_lock(&pools_lock);
 	if (list_empty(&dev->dma_pools))
 		empty = true;
-	list_add(&retval->pools, &dev->dma_pools);
+	list_add(&pool->pools, &dev->dma_pools);
 	mutex_unlock(&pools_lock);
 	if (empty) {
 		int err;
@@ -188,18 +143,94 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 		err = device_create_file(dev, &dev_attr_pools);
 		if (err) {
 			mutex_lock(&pools_lock);
-			list_del(&retval->pools);
+			list_del(&pool->pools);
 			mutex_unlock(&pools_lock);
 			mutex_unlock(&pools_reg_lock);
-			kfree(retval);
-			return NULL;
+			return err;
 		}
 	}
 	mutex_unlock(&pools_reg_lock);
-	return retval;
+	return 0;
+}
+
+/**
+ * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @boundary: returned blocks won't cross this power of two boundary
+ * Context: not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc()
+ * may be used to allocate memory.  Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives.  The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ *  Return: a dma allocation pool with the requested characteristics, or
+ * %NULL if one can't be created.
+ */
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+				 size_t size, size_t align, size_t boundary)
+{
+	struct dma_pool *pool;
+
+	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, dev_to_node(dev));
+	if (!pool)
+		return NULL;
+	if (__dma_pool_init(dev, pool, name, size, align, boundary))
+		goto out_free_pool;
+	pool->is_coherent = true;
+	return pool;
+out_free_pool:
+	kfree(pool);
+	return NULL;
 }
 EXPORT_SYMBOL(dma_pool_create);
 
+/**
+ * dma_pool_init - initialize a pool DMA addressable memory
+ * @dev:	device that will be doing the DMA
+ * @pool:	pool to initialize
+ * @name:	name of pool, for diagnostics
+ * @size:	size of the blocks in this pool.
+ * @align:	alignment requirement for blocks; must be a power of two
+ * @boundary:	returned blocks won't cross this power of two boundary
+ * @dir:	DMA direction the allocations are going to be used for
+ *
+ * Context:	not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc() may be used to allocate memory.
+ * Such memory will have the same semantics as memory returned from
+ * dma_alloc_pages(), that is ownership needs to be transferred to and from the
+ * device.  The actual size of blocks allocated may be larger than requested
+ * because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir)
+{
+	int ret;
+
+	ret = __dma_pool_init(dev, pool, name, size, align, boundary);
+	if (ret)
+		return ret;
+	pool->is_coherent = false;
+	pool->dir = dir;
+	return 0;
+}
+EXPORT_SYMBOL(dma_pool_init);
+
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
 	unsigned int offset = 0;
@@ -223,8 +254,12 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 	page = kmalloc(sizeof(*page), mem_flags);
 	if (!page)
 		return NULL;
-	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
-					 &page->dma, mem_flags);
+	if (pool->is_coherent)
+		page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
+						 &page->dma, mem_flags);
+	else
+		page->vaddr = dma_alloc_pages(pool->dev, pool->allocation,
+					      &page->dma, pool->dir, mem_flags);
 	if (page->vaddr) {
 #ifdef	DMAPOOL_DEBUG
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
@@ -251,20 +286,25 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 #ifdef	DMAPOOL_DEBUG
 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
-	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+	if (pool->is_coherent)
+		dma_free_coherent(pool->dev, pool->allocation, page->vaddr,
+				  dma);
+	else
+		dma_free_pages(pool->dev, pool->allocation, page->vaddr, dma,
+			       pool->dir);
 	list_del(&page->page_list);
 	kfree(page);
 }
 
 /**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * dma_pool_exit - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
  * Context: !in_interrupt()
  *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
+ * Caller guarantees that no more memory from the pool is in use, and that
+ * nothing will try to use the pool after this call.
  */
-void dma_pool_destroy(struct dma_pool *pool)
+void dma_pool_exit(struct dma_pool *pool)
 {
 	bool empty = false;
 
@@ -299,7 +339,20 @@ void dma_pool_destroy(struct dma_pool *pool)
 		} else
 			pool_free_page(pool, page);
 	}
+}
+EXPORT_SYMBOL(dma_pool_exit);
 
+/**
+ * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void dma_pool_destroy(struct dma_pool *pool)
+{
+	dma_pool_exit(pool);
 	kfree(pool);
 }
 EXPORT_SYMBOL(dma_pool_destroy);
-- 
2.28.0


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Mauro Carvalho Chehab <mchehab@kernel.org>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Joonyoung Shim <jy0922.shim@samsung.com>,
	Seung-Woo Kim <sw0312.kim@samsung.com>,
	Kyungmin Park <kyungmin.park@samsung.com>,
	Ben Skeggs <bskeggs@redhat.com>, Pawel Osciak <pawel@osciak.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Matt Porter <mporter@kernel.crashing.org>,
	iommu@lists.linux-foundation.org
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	alsa-devel@alsa-project.org, linux-samsung-soc@vger.kernel.org,
	linux-ia64@vger.kernel.org, linux-scsi@vger.kernel.org,
	linux-parisc@vger.kernel.org, linux-doc@vger.kernel.org,
	nouveau@lists.freedesktop.org, linux-kernel@vger.kernel.org,
	linux-nvme@lists.infradead.org, linux-mips@vger.kernel.org,
	linux-mm@kvack.org, netdev@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-media@vger.kernel.org
Subject: [PATCH 26/28] dmapool: add dma_alloc_pages support
Date: Wed, 19 Aug 2020 08:55:53 +0200	[thread overview]
Message-ID: <20200819065555.1802761-27-hch@lst.de> (raw)
In-Reply-To: <20200819065555.1802761-1-hch@lst.de>

Add an new variant of a dmapool that uses non-coherent memory from
dma_alloc_pages.  Unlike the existing mempool_create this one
initialized a pool allocated by the caller to avoid a pointless extra
allocation.  At some point it might be worth to also switch the coherent
allocation over to a similar dma_pool_init_coherent helper, but that is
better done as a separate series including a few conversions.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/dmapool.h |  23 ++++-
 mm/dmapool.c            | 211 +++++++++++++++++++++++++---------------
 2 files changed, 154 insertions(+), 80 deletions(-)

diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb423840..1387525c4e52e8 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,10 @@
 #ifndef LINUX_DMAPOOL_H
 #define	LINUX_DMAPOOL_H
 
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 
@@ -18,11 +22,28 @@ struct device;
 
 #ifdef CONFIG_HAS_DMA
 
+struct dma_pool {		/* the pool */
+	struct list_head page_list;
+	spinlock_t lock;
+	size_t size;
+	struct device *dev;
+	size_t allocation;
+	size_t boundary;
+	bool is_coherent;
+	enum dma_data_direction dir;
+	char name[32];
+	struct list_head pools;
+};
+
 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 
 			size_t size, size_t align, size_t allocation);
-
 void dma_pool_destroy(struct dma_pool *pool);
 
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir);
+void dma_pool_exit(struct dma_pool *pool);
+
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle);
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index f9fb9bbd733e0f..c60a48b22c8d6a 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -6,10 +6,10 @@
  * Copyright 2007 Intel Corporation
  *   Author: Matthew Wilcox <willy@linux.intel.com>
  *
- * This allocator returns small blocks of a given size which are DMA-able by
- * the given device.  It uses the dma_alloc_coherent page allocator to get
- * new pages, then splits them up into blocks of the required size.
- * Many older drivers still have their own code to do this.
+ * This allocator returns small blocks of a given size which are DMA-able by the
+ * given device.  It either uses the dma_alloc_coherent or the dma_alloc_pages
+ * allocator to get new pages, then splits them up into blocks of the required
+ * size.
  *
  * The current design of this allocator is fairly simple.  The pool is
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
@@ -39,17 +39,6 @@
 #define DMAPOOL_DEBUG 1
 #endif
 
-struct dma_pool {		/* the pool */
-	struct list_head page_list;
-	spinlock_t lock;
-	size_t size;
-	struct device *dev;
-	size_t allocation;
-	size_t boundary;
-	char name[32];
-	struct list_head pools;
-};
-
 struct dma_page {		/* cacheable header for 'allocation' bytes */
 	struct list_head page_list;
 	void *vaddr;
@@ -104,74 +93,40 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 
 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
 
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @boundary: returned blocks won't cross this power of two boundary
- * Context: not in_interrupt()
- *
- * Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory.  Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives.  The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary.  This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- *
- * Return: a dma allocation pool with the requested characteristics, or
- * %NULL if one can't be created.
- */
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
-				 size_t size, size_t align, size_t boundary)
+static int __dma_pool_init(struct device *dev, struct dma_pool *pool,
+		const char *name, size_t size, size_t align, size_t boundary)
 {
-	struct dma_pool *retval;
 	size_t allocation;
 	bool empty = false;
 
 	if (align == 0)
 		align = 1;
-	else if (align & (align - 1))
-		return NULL;
+	if (align & (align - 1))
+		return -EINVAL;
 
 	if (size == 0)
-		return NULL;
-	else if (size < 4)
-		size = 4;
-
-	size = ALIGN(size, align);
+		return -EINVAL;
+	size = ALIGN(min_t(size_t, size, 4), align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
 
 	if (!boundary)
 		boundary = allocation;
-	else if ((boundary < size) || (boundary & (boundary - 1)))
-		return NULL;
-
-	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
-	if (!retval)
-		return retval;
-
-	strlcpy(retval->name, name, sizeof(retval->name));
-
-	retval->dev = dev;
-
-	INIT_LIST_HEAD(&retval->page_list);
-	spin_lock_init(&retval->lock);
-	retval->size = size;
-	retval->boundary = boundary;
-	retval->allocation = allocation;
-
-	INIT_LIST_HEAD(&retval->pools);
+	if (boundary < size || (boundary & (boundary - 1)))
+		return -EINVAL;
+
+	strlcpy(pool->name, name, sizeof(pool->name));
+	pool->dev = dev;
+	INIT_LIST_HEAD(&pool->page_list);
+	spin_lock_init(&pool->lock);
+	pool->size = size;
+	pool->boundary = boundary;
+	pool->allocation = allocation;
+	INIT_LIST_HEAD(&pool->pools);
 
 	/*
 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
 	 * pools_reg_lock ensures that there is not a race between
-	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
+	 * __dma_pool_init() and dma_pool_exit() or within dma_pool_create()
 	 * when the first invocation of dma_pool_create() failed on
 	 * device_create_file() and the second assumes that it has been done (I
 	 * know it is a short window).
@@ -180,7 +135,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	mutex_lock(&pools_lock);
 	if (list_empty(&dev->dma_pools))
 		empty = true;
-	list_add(&retval->pools, &dev->dma_pools);
+	list_add(&pool->pools, &dev->dma_pools);
 	mutex_unlock(&pools_lock);
 	if (empty) {
 		int err;
@@ -188,18 +143,94 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 		err = device_create_file(dev, &dev_attr_pools);
 		if (err) {
 			mutex_lock(&pools_lock);
-			list_del(&retval->pools);
+			list_del(&pool->pools);
 			mutex_unlock(&pools_lock);
 			mutex_unlock(&pools_reg_lock);
-			kfree(retval);
-			return NULL;
+			return err;
 		}
 	}
 	mutex_unlock(&pools_reg_lock);
-	return retval;
+	return 0;
+}
+
+/**
+ * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @boundary: returned blocks won't cross this power of two boundary
+ * Context: not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc()
+ * may be used to allocate memory.  Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives.  The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ *  Return: a dma allocation pool with the requested characteristics, or
+ * %NULL if one can't be created.
+ */
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+				 size_t size, size_t align, size_t boundary)
+{
+	struct dma_pool *pool;
+
+	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, dev_to_node(dev));
+	if (!pool)
+		return NULL;
+	if (__dma_pool_init(dev, pool, name, size, align, boundary))
+		goto out_free_pool;
+	pool->is_coherent = true;
+	return pool;
+out_free_pool:
+	kfree(pool);
+	return NULL;
 }
 EXPORT_SYMBOL(dma_pool_create);
 
+/**
+ * dma_pool_init - initialize a pool DMA addressable memory
+ * @dev:	device that will be doing the DMA
+ * @pool:	pool to initialize
+ * @name:	name of pool, for diagnostics
+ * @size:	size of the blocks in this pool.
+ * @align:	alignment requirement for blocks; must be a power of two
+ * @boundary:	returned blocks won't cross this power of two boundary
+ * @dir:	DMA direction the allocations are going to be used for
+ *
+ * Context:	not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc() may be used to allocate memory.
+ * Such memory will have the same semantics as memory returned from
+ * dma_alloc_pages(), that is ownership needs to be transferred to and from the
+ * device.  The actual size of blocks allocated may be larger than requested
+ * because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir)
+{
+	int ret;
+
+	ret = __dma_pool_init(dev, pool, name, size, align, boundary);
+	if (ret)
+		return ret;
+	pool->is_coherent = false;
+	pool->dir = dir;
+	return 0;
+}
+EXPORT_SYMBOL(dma_pool_init);
+
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
 	unsigned int offset = 0;
@@ -223,8 +254,12 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 	page = kmalloc(sizeof(*page), mem_flags);
 	if (!page)
 		return NULL;
-	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
-					 &page->dma, mem_flags);
+	if (pool->is_coherent)
+		page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
+						 &page->dma, mem_flags);
+	else
+		page->vaddr = dma_alloc_pages(pool->dev, pool->allocation,
+					      &page->dma, pool->dir, mem_flags);
 	if (page->vaddr) {
 #ifdef	DMAPOOL_DEBUG
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
@@ -251,20 +286,25 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 #ifdef	DMAPOOL_DEBUG
 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
-	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+	if (pool->is_coherent)
+		dma_free_coherent(pool->dev, pool->allocation, page->vaddr,
+				  dma);
+	else
+		dma_free_pages(pool->dev, pool->allocation, page->vaddr, dma,
+			       pool->dir);
 	list_del(&page->page_list);
 	kfree(page);
 }
 
 /**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * dma_pool_exit - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
  * Context: !in_interrupt()
  *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
+ * Caller guarantees that no more memory from the pool is in use, and that
+ * nothing will try to use the pool after this call.
  */
-void dma_pool_destroy(struct dma_pool *pool)
+void dma_pool_exit(struct dma_pool *pool)
 {
 	bool empty = false;
 
@@ -299,7 +339,20 @@ void dma_pool_destroy(struct dma_pool *pool)
 		} else
 			pool_free_page(pool, page);
 	}
+}
+EXPORT_SYMBOL(dma_pool_exit);
 
+/**
+ * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void dma_pool_destroy(struct dma_pool *pool)
+{
+	dma_pool_exit(pool);
 	kfree(pool);
 }
 EXPORT_SYMBOL(dma_pool_destroy);
-- 
2.28.0


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
To: Mauro Carvalho Chehab
	<mchehab-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	Thomas Bogendoerfer
	<tsbogend-I1c7kopa9pxLokYuJOExCg@public.gmane.org>,
	"James E.J. Bottomley"
	<James.Bottomley-d9PhHud1JfjCXq6kfMZ53/egYHeGw8Jk@public.gmane.org>,
	Joonyoung Shim
	<jy0922.shim-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>,
	Seung-Woo Kim
	<sw0312.kim-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>,
	Kyungmin Park
	<kyungmin.park-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>,
	Ben Skeggs <bskeggs-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Pawel Osciak <pawel-FA/gS7QP4orQT0dZR+AlfA@public.gmane.org>,
	Marek Szyprowski
	<m.szyprowski-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>,
	Matt Porter
	<mporter-XVmvHMARGAS8U2dJNN8I7kB+6BGkLq7r@public.gmane.org>,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org
Cc: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>,
	alsa-devel-K7yf7f+aM1XWsZ/bQMPhNw@public.gmane.org,
	linux-samsung-soc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-ia64-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-scsi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-parisc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-doc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
	linux-mips-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
	netdev-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
	linux-media-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Subject: [PATCH 26/28] dmapool: add dma_alloc_pages support
Date: Wed, 19 Aug 2020 08:55:53 +0200	[thread overview]
Message-ID: <20200819065555.1802761-27-hch@lst.de> (raw)
In-Reply-To: <20200819065555.1802761-1-hch-jcswGhMUV9g@public.gmane.org>

Add an new variant of a dmapool that uses non-coherent memory from
dma_alloc_pages.  Unlike the existing mempool_create this one
initialized a pool allocated by the caller to avoid a pointless extra
allocation.  At some point it might be worth to also switch the coherent
allocation over to a similar dma_pool_init_coherent helper, but that is
better done as a separate series including a few conversions.

Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
---
 include/linux/dmapool.h |  23 ++++-
 mm/dmapool.c            | 211 +++++++++++++++++++++++++---------------
 2 files changed, 154 insertions(+), 80 deletions(-)

diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb423840..1387525c4e52e8 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,10 @@
 #ifndef LINUX_DMAPOOL_H
 #define	LINUX_DMAPOOL_H
 
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 
@@ -18,11 +22,28 @@ struct device;
 
 #ifdef CONFIG_HAS_DMA
 
+struct dma_pool {		/* the pool */
+	struct list_head page_list;
+	spinlock_t lock;
+	size_t size;
+	struct device *dev;
+	size_t allocation;
+	size_t boundary;
+	bool is_coherent;
+	enum dma_data_direction dir;
+	char name[32];
+	struct list_head pools;
+};
+
 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 
 			size_t size, size_t align, size_t allocation);
-
 void dma_pool_destroy(struct dma_pool *pool);
 
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir);
+void dma_pool_exit(struct dma_pool *pool);
+
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle);
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index f9fb9bbd733e0f..c60a48b22c8d6a 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -6,10 +6,10 @@
  * Copyright 2007 Intel Corporation
  *   Author: Matthew Wilcox <willy-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>
  *
- * This allocator returns small blocks of a given size which are DMA-able by
- * the given device.  It uses the dma_alloc_coherent page allocator to get
- * new pages, then splits them up into blocks of the required size.
- * Many older drivers still have their own code to do this.
+ * This allocator returns small blocks of a given size which are DMA-able by the
+ * given device.  It either uses the dma_alloc_coherent or the dma_alloc_pages
+ * allocator to get new pages, then splits them up into blocks of the required
+ * size.
  *
  * The current design of this allocator is fairly simple.  The pool is
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
@@ -39,17 +39,6 @@
 #define DMAPOOL_DEBUG 1
 #endif
 
-struct dma_pool {		/* the pool */
-	struct list_head page_list;
-	spinlock_t lock;
-	size_t size;
-	struct device *dev;
-	size_t allocation;
-	size_t boundary;
-	char name[32];
-	struct list_head pools;
-};
-
 struct dma_page {		/* cacheable header for 'allocation' bytes */
 	struct list_head page_list;
 	void *vaddr;
@@ -104,74 +93,40 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 
 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
 
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @boundary: returned blocks won't cross this power of two boundary
- * Context: not in_interrupt()
- *
- * Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory.  Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives.  The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary.  This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- *
- * Return: a dma allocation pool with the requested characteristics, or
- * %NULL if one can't be created.
- */
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
-				 size_t size, size_t align, size_t boundary)
+static int __dma_pool_init(struct device *dev, struct dma_pool *pool,
+		const char *name, size_t size, size_t align, size_t boundary)
 {
-	struct dma_pool *retval;
 	size_t allocation;
 	bool empty = false;
 
 	if (align == 0)
 		align = 1;
-	else if (align & (align - 1))
-		return NULL;
+	if (align & (align - 1))
+		return -EINVAL;
 
 	if (size == 0)
-		return NULL;
-	else if (size < 4)
-		size = 4;
-
-	size = ALIGN(size, align);
+		return -EINVAL;
+	size = ALIGN(min_t(size_t, size, 4), align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
 
 	if (!boundary)
 		boundary = allocation;
-	else if ((boundary < size) || (boundary & (boundary - 1)))
-		return NULL;
-
-	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
-	if (!retval)
-		return retval;
-
-	strlcpy(retval->name, name, sizeof(retval->name));
-
-	retval->dev = dev;
-
-	INIT_LIST_HEAD(&retval->page_list);
-	spin_lock_init(&retval->lock);
-	retval->size = size;
-	retval->boundary = boundary;
-	retval->allocation = allocation;
-
-	INIT_LIST_HEAD(&retval->pools);
+	if (boundary < size || (boundary & (boundary - 1)))
+		return -EINVAL;
+
+	strlcpy(pool->name, name, sizeof(pool->name));
+	pool->dev = dev;
+	INIT_LIST_HEAD(&pool->page_list);
+	spin_lock_init(&pool->lock);
+	pool->size = size;
+	pool->boundary = boundary;
+	pool->allocation = allocation;
+	INIT_LIST_HEAD(&pool->pools);
 
 	/*
 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
 	 * pools_reg_lock ensures that there is not a race between
-	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
+	 * __dma_pool_init() and dma_pool_exit() or within dma_pool_create()
 	 * when the first invocation of dma_pool_create() failed on
 	 * device_create_file() and the second assumes that it has been done (I
 	 * know it is a short window).
@@ -180,7 +135,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	mutex_lock(&pools_lock);
 	if (list_empty(&dev->dma_pools))
 		empty = true;
-	list_add(&retval->pools, &dev->dma_pools);
+	list_add(&pool->pools, &dev->dma_pools);
 	mutex_unlock(&pools_lock);
 	if (empty) {
 		int err;
@@ -188,18 +143,94 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 		err = device_create_file(dev, &dev_attr_pools);
 		if (err) {
 			mutex_lock(&pools_lock);
-			list_del(&retval->pools);
+			list_del(&pool->pools);
 			mutex_unlock(&pools_lock);
 			mutex_unlock(&pools_reg_lock);
-			kfree(retval);
-			return NULL;
+			return err;
 		}
 	}
 	mutex_unlock(&pools_reg_lock);
-	return retval;
+	return 0;
+}
+
+/**
+ * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @boundary: returned blocks won't cross this power of two boundary
+ * Context: not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc()
+ * may be used to allocate memory.  Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives.  The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ *  Return: a dma allocation pool with the requested characteristics, or
+ * %NULL if one can't be created.
+ */
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+				 size_t size, size_t align, size_t boundary)
+{
+	struct dma_pool *pool;
+
+	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, dev_to_node(dev));
+	if (!pool)
+		return NULL;
+	if (__dma_pool_init(dev, pool, name, size, align, boundary))
+		goto out_free_pool;
+	pool->is_coherent = true;
+	return pool;
+out_free_pool:
+	kfree(pool);
+	return NULL;
 }
 EXPORT_SYMBOL(dma_pool_create);
 
+/**
+ * dma_pool_init - initialize a pool DMA addressable memory
+ * @dev:	device that will be doing the DMA
+ * @pool:	pool to initialize
+ * @name:	name of pool, for diagnostics
+ * @size:	size of the blocks in this pool.
+ * @align:	alignment requirement for blocks; must be a power of two
+ * @boundary:	returned blocks won't cross this power of two boundary
+ * @dir:	DMA direction the allocations are going to be used for
+ *
+ * Context:	not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc() may be used to allocate memory.
+ * Such memory will have the same semantics as memory returned from
+ * dma_alloc_pages(), that is ownership needs to be transferred to and from the
+ * device.  The actual size of blocks allocated may be larger than requested
+ * because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir)
+{
+	int ret;
+
+	ret = __dma_pool_init(dev, pool, name, size, align, boundary);
+	if (ret)
+		return ret;
+	pool->is_coherent = false;
+	pool->dir = dir;
+	return 0;
+}
+EXPORT_SYMBOL(dma_pool_init);
+
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
 	unsigned int offset = 0;
@@ -223,8 +254,12 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 	page = kmalloc(sizeof(*page), mem_flags);
 	if (!page)
 		return NULL;
-	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
-					 &page->dma, mem_flags);
+	if (pool->is_coherent)
+		page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
+						 &page->dma, mem_flags);
+	else
+		page->vaddr = dma_alloc_pages(pool->dev, pool->allocation,
+					      &page->dma, pool->dir, mem_flags);
 	if (page->vaddr) {
 #ifdef	DMAPOOL_DEBUG
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
@@ -251,20 +286,25 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 #ifdef	DMAPOOL_DEBUG
 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
-	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+	if (pool->is_coherent)
+		dma_free_coherent(pool->dev, pool->allocation, page->vaddr,
+				  dma);
+	else
+		dma_free_pages(pool->dev, pool->allocation, page->vaddr, dma,
+			       pool->dir);
 	list_del(&page->page_list);
 	kfree(page);
 }
 
 /**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * dma_pool_exit - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
  * Context: !in_interrupt()
  *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
+ * Caller guarantees that no more memory from the pool is in use, and that
+ * nothing will try to use the pool after this call.
  */
-void dma_pool_destroy(struct dma_pool *pool)
+void dma_pool_exit(struct dma_pool *pool)
 {
 	bool empty = false;
 
@@ -299,7 +339,20 @@ void dma_pool_destroy(struct dma_pool *pool)
 		} else
 			pool_free_page(pool, page);
 	}
+}
+EXPORT_SYMBOL(dma_pool_exit);
 
+/**
+ * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void dma_pool_destroy(struct dma_pool *pool)
+{
+	dma_pool_exit(pool);
 	kfree(pool);
 }
 EXPORT_SYMBOL(dma_pool_destroy);
-- 
2.28.0

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Mauro Carvalho Chehab <mchehab@kernel.org>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Joonyoung Shim <jy0922.shim@samsung.com>,
	Seung-Woo Kim <sw0312.kim@samsung.com>,
	Kyungmin Park <kyungmin.park@samsung.com>,
	Ben Skeggs <bskeggs@redhat.com>, Pawel Osciak <pawel@osciak.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Matt Porter <mporter@kernel.crashing.org>,
	iommu@lists.linux-foundation.org
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	alsa-devel@alsa-project.org, linux-samsung-soc@vger.kernel.org,
	linux-ia64@vger.kernel.org, linux-scsi@vger.kernel.org,
	linux-parisc@vger.kernel.org, linux-doc@vger.kernel.org,
	nouveau@lists.freedesktop.org, linux-kernel@vger.kernel.org,
	linux-nvme@lists.infradead.org, linux-mips@vger.kernel.org,
	linux-mm@kvack.org, netdev@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-media@vger.kernel.org
Subject: [PATCH 26/28] dmapool: add dma_alloc_pages support
Date: Wed, 19 Aug 2020 08:55:53 +0200	[thread overview]
Message-ID: <20200819065555.1802761-27-hch@lst.de> (raw)
In-Reply-To: <20200819065555.1802761-1-hch@lst.de>

Add an new variant of a dmapool that uses non-coherent memory from
dma_alloc_pages.  Unlike the existing mempool_create this one
initialized a pool allocated by the caller to avoid a pointless extra
allocation.  At some point it might be worth to also switch the coherent
allocation over to a similar dma_pool_init_coherent helper, but that is
better done as a separate series including a few conversions.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/dmapool.h |  23 ++++-
 mm/dmapool.c            | 211 +++++++++++++++++++++++++---------------
 2 files changed, 154 insertions(+), 80 deletions(-)

diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb423840..1387525c4e52e8 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,10 @@
 #ifndef LINUX_DMAPOOL_H
 #define	LINUX_DMAPOOL_H
 
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 
@@ -18,11 +22,28 @@ struct device;
 
 #ifdef CONFIG_HAS_DMA
 
+struct dma_pool {		/* the pool */
+	struct list_head page_list;
+	spinlock_t lock;
+	size_t size;
+	struct device *dev;
+	size_t allocation;
+	size_t boundary;
+	bool is_coherent;
+	enum dma_data_direction dir;
+	char name[32];
+	struct list_head pools;
+};
+
 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 
 			size_t size, size_t align, size_t allocation);
-
 void dma_pool_destroy(struct dma_pool *pool);
 
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir);
+void dma_pool_exit(struct dma_pool *pool);
+
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle);
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index f9fb9bbd733e0f..c60a48b22c8d6a 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -6,10 +6,10 @@
  * Copyright 2007 Intel Corporation
  *   Author: Matthew Wilcox <willy@linux.intel.com>
  *
- * This allocator returns small blocks of a given size which are DMA-able by
- * the given device.  It uses the dma_alloc_coherent page allocator to get
- * new pages, then splits them up into blocks of the required size.
- * Many older drivers still have their own code to do this.
+ * This allocator returns small blocks of a given size which are DMA-able by the
+ * given device.  It either uses the dma_alloc_coherent or the dma_alloc_pages
+ * allocator to get new pages, then splits them up into blocks of the required
+ * size.
  *
  * The current design of this allocator is fairly simple.  The pool is
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
@@ -39,17 +39,6 @@
 #define DMAPOOL_DEBUG 1
 #endif
 
-struct dma_pool {		/* the pool */
-	struct list_head page_list;
-	spinlock_t lock;
-	size_t size;
-	struct device *dev;
-	size_t allocation;
-	size_t boundary;
-	char name[32];
-	struct list_head pools;
-};
-
 struct dma_page {		/* cacheable header for 'allocation' bytes */
 	struct list_head page_list;
 	void *vaddr;
@@ -104,74 +93,40 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 
 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
 
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @boundary: returned blocks won't cross this power of two boundary
- * Context: not in_interrupt()
- *
- * Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory.  Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives.  The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary.  This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- *
- * Return: a dma allocation pool with the requested characteristics, or
- * %NULL if one can't be created.
- */
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
-				 size_t size, size_t align, size_t boundary)
+static int __dma_pool_init(struct device *dev, struct dma_pool *pool,
+		const char *name, size_t size, size_t align, size_t boundary)
 {
-	struct dma_pool *retval;
 	size_t allocation;
 	bool empty = false;
 
 	if (align == 0)
 		align = 1;
-	else if (align & (align - 1))
-		return NULL;
+	if (align & (align - 1))
+		return -EINVAL;
 
 	if (size == 0)
-		return NULL;
-	else if (size < 4)
-		size = 4;
-
-	size = ALIGN(size, align);
+		return -EINVAL;
+	size = ALIGN(min_t(size_t, size, 4), align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
 
 	if (!boundary)
 		boundary = allocation;
-	else if ((boundary < size) || (boundary & (boundary - 1)))
-		return NULL;
-
-	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
-	if (!retval)
-		return retval;
-
-	strlcpy(retval->name, name, sizeof(retval->name));
-
-	retval->dev = dev;
-
-	INIT_LIST_HEAD(&retval->page_list);
-	spin_lock_init(&retval->lock);
-	retval->size = size;
-	retval->boundary = boundary;
-	retval->allocation = allocation;
-
-	INIT_LIST_HEAD(&retval->pools);
+	if (boundary < size || (boundary & (boundary - 1)))
+		return -EINVAL;
+
+	strlcpy(pool->name, name, sizeof(pool->name));
+	pool->dev = dev;
+	INIT_LIST_HEAD(&pool->page_list);
+	spin_lock_init(&pool->lock);
+	pool->size = size;
+	pool->boundary = boundary;
+	pool->allocation = allocation;
+	INIT_LIST_HEAD(&pool->pools);
 
 	/*
 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
 	 * pools_reg_lock ensures that there is not a race between
-	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
+	 * __dma_pool_init() and dma_pool_exit() or within dma_pool_create()
 	 * when the first invocation of dma_pool_create() failed on
 	 * device_create_file() and the second assumes that it has been done (I
 	 * know it is a short window).
@@ -180,7 +135,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	mutex_lock(&pools_lock);
 	if (list_empty(&dev->dma_pools))
 		empty = true;
-	list_add(&retval->pools, &dev->dma_pools);
+	list_add(&pool->pools, &dev->dma_pools);
 	mutex_unlock(&pools_lock);
 	if (empty) {
 		int err;
@@ -188,18 +143,94 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 		err = device_create_file(dev, &dev_attr_pools);
 		if (err) {
 			mutex_lock(&pools_lock);
-			list_del(&retval->pools);
+			list_del(&pool->pools);
 			mutex_unlock(&pools_lock);
 			mutex_unlock(&pools_reg_lock);
-			kfree(retval);
-			return NULL;
+			return err;
 		}
 	}
 	mutex_unlock(&pools_reg_lock);
-	return retval;
+	return 0;
+}
+
+/**
+ * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @boundary: returned blocks won't cross this power of two boundary
+ * Context: not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc()
+ * may be used to allocate memory.  Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives.  The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ *  Return: a dma allocation pool with the requested characteristics, or
+ * %NULL if one can't be created.
+ */
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+				 size_t size, size_t align, size_t boundary)
+{
+	struct dma_pool *pool;
+
+	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, dev_to_node(dev));
+	if (!pool)
+		return NULL;
+	if (__dma_pool_init(dev, pool, name, size, align, boundary))
+		goto out_free_pool;
+	pool->is_coherent = true;
+	return pool;
+out_free_pool:
+	kfree(pool);
+	return NULL;
 }
 EXPORT_SYMBOL(dma_pool_create);
 
+/**
+ * dma_pool_init - initialize a pool DMA addressable memory
+ * @dev:	device that will be doing the DMA
+ * @pool:	pool to initialize
+ * @name:	name of pool, for diagnostics
+ * @size:	size of the blocks in this pool.
+ * @align:	alignment requirement for blocks; must be a power of two
+ * @boundary:	returned blocks won't cross this power of two boundary
+ * @dir:	DMA direction the allocations are going to be used for
+ *
+ * Context:	not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc() may be used to allocate memory.
+ * Such memory will have the same semantics as memory returned from
+ * dma_alloc_pages(), that is ownership needs to be transferred to and from the
+ * device.  The actual size of blocks allocated may be larger than requested
+ * because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir)
+{
+	int ret;
+
+	ret = __dma_pool_init(dev, pool, name, size, align, boundary);
+	if (ret)
+		return ret;
+	pool->is_coherent = false;
+	pool->dir = dir;
+	return 0;
+}
+EXPORT_SYMBOL(dma_pool_init);
+
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
 	unsigned int offset = 0;
@@ -223,8 +254,12 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 	page = kmalloc(sizeof(*page), mem_flags);
 	if (!page)
 		return NULL;
-	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
-					 &page->dma, mem_flags);
+	if (pool->is_coherent)
+		page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
+						 &page->dma, mem_flags);
+	else
+		page->vaddr = dma_alloc_pages(pool->dev, pool->allocation,
+					      &page->dma, pool->dir, mem_flags);
 	if (page->vaddr) {
 #ifdef	DMAPOOL_DEBUG
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
@@ -251,20 +286,25 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 #ifdef	DMAPOOL_DEBUG
 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
-	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+	if (pool->is_coherent)
+		dma_free_coherent(pool->dev, pool->allocation, page->vaddr,
+				  dma);
+	else
+		dma_free_pages(pool->dev, pool->allocation, page->vaddr, dma,
+			       pool->dir);
 	list_del(&page->page_list);
 	kfree(page);
 }
 
 /**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * dma_pool_exit - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
  * Context: !in_interrupt()
  *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
+ * Caller guarantees that no more memory from the pool is in use, and that
+ * nothing will try to use the pool after this call.
  */
-void dma_pool_destroy(struct dma_pool *pool)
+void dma_pool_exit(struct dma_pool *pool)
 {
 	bool empty = false;
 
@@ -299,7 +339,20 @@ void dma_pool_destroy(struct dma_pool *pool)
 		} else
 			pool_free_page(pool, page);
 	}
+}
+EXPORT_SYMBOL(dma_pool_exit);
 
+/**
+ * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void dma_pool_destroy(struct dma_pool *pool)
+{
+	dma_pool_exit(pool);
 	kfree(pool);
 }
 EXPORT_SYMBOL(dma_pool_destroy);
-- 
2.28.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Mauro Carvalho Chehab <mchehab@kernel.org>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Joonyoung Shim <jy0922.shim@samsung.com>,
	Seung-Woo Kim <sw0312.kim@samsung.com>,
	Kyungmin Park <kyungmin.park@samsung.com>,
	Ben Skeggs <bskeggs@redhat.com>, Pawel Osciak <pawel@osciak.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Matt Porter <mporter@kernel.crashing.org>,
	iommu@lists.linux-foundation.org
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	alsa-devel@alsa-project.org, linux-samsung-soc@vger.kernel.org,
	linux-ia64@vger.kernel.org, linux-scsi@vger.kernel.org,
	linux-parisc@vger.kernel.org, linux-doc@vger.kernel.org,
	nouveau@lists.freedesktop.org, linux-kernel@vger.kernel.org,
	linux-nvme@lists.infradead.org, linux-mips@vger.kernel.org,
	linux-mm@kvack.org, netdev@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-media@vger.kernel.org
Subject: [PATCH 26/28] dmapool: add dma_alloc_pages support
Date: Wed, 19 Aug 2020 08:55:53 +0200	[thread overview]
Message-ID: <20200819065555.1802761-27-hch@lst.de> (raw)
In-Reply-To: <20200819065555.1802761-1-hch@lst.de>

Add an new variant of a dmapool that uses non-coherent memory from
dma_alloc_pages.  Unlike the existing mempool_create this one
initialized a pool allocated by the caller to avoid a pointless extra
allocation.  At some point it might be worth to also switch the coherent
allocation over to a similar dma_pool_init_coherent helper, but that is
better done as a separate series including a few conversions.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/dmapool.h |  23 ++++-
 mm/dmapool.c            | 211 +++++++++++++++++++++++++---------------
 2 files changed, 154 insertions(+), 80 deletions(-)

diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb423840..1387525c4e52e8 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,10 @@
 #ifndef LINUX_DMAPOOL_H
 #define	LINUX_DMAPOOL_H
 
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 
@@ -18,11 +22,28 @@ struct device;
 
 #ifdef CONFIG_HAS_DMA
 
+struct dma_pool {		/* the pool */
+	struct list_head page_list;
+	spinlock_t lock;
+	size_t size;
+	struct device *dev;
+	size_t allocation;
+	size_t boundary;
+	bool is_coherent;
+	enum dma_data_direction dir;
+	char name[32];
+	struct list_head pools;
+};
+
 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 
 			size_t size, size_t align, size_t allocation);
-
 void dma_pool_destroy(struct dma_pool *pool);
 
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir);
+void dma_pool_exit(struct dma_pool *pool);
+
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle);
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index f9fb9bbd733e0f..c60a48b22c8d6a 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -6,10 +6,10 @@
  * Copyright 2007 Intel Corporation
  *   Author: Matthew Wilcox <willy@linux.intel.com>
  *
- * This allocator returns small blocks of a given size which are DMA-able by
- * the given device.  It uses the dma_alloc_coherent page allocator to get
- * new pages, then splits them up into blocks of the required size.
- * Many older drivers still have their own code to do this.
+ * This allocator returns small blocks of a given size which are DMA-able by the
+ * given device.  It either uses the dma_alloc_coherent or the dma_alloc_pages
+ * allocator to get new pages, then splits them up into blocks of the required
+ * size.
  *
  * The current design of this allocator is fairly simple.  The pool is
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
@@ -39,17 +39,6 @@
 #define DMAPOOL_DEBUG 1
 #endif
 
-struct dma_pool {		/* the pool */
-	struct list_head page_list;
-	spinlock_t lock;
-	size_t size;
-	struct device *dev;
-	size_t allocation;
-	size_t boundary;
-	char name[32];
-	struct list_head pools;
-};
-
 struct dma_page {		/* cacheable header for 'allocation' bytes */
 	struct list_head page_list;
 	void *vaddr;
@@ -104,74 +93,40 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 
 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
 
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @boundary: returned blocks won't cross this power of two boundary
- * Context: not in_interrupt()
- *
- * Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory.  Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives.  The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary.  This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- *
- * Return: a dma allocation pool with the requested characteristics, or
- * %NULL if one can't be created.
- */
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
-				 size_t size, size_t align, size_t boundary)
+static int __dma_pool_init(struct device *dev, struct dma_pool *pool,
+		const char *name, size_t size, size_t align, size_t boundary)
 {
-	struct dma_pool *retval;
 	size_t allocation;
 	bool empty = false;
 
 	if (align == 0)
 		align = 1;
-	else if (align & (align - 1))
-		return NULL;
+	if (align & (align - 1))
+		return -EINVAL;
 
 	if (size == 0)
-		return NULL;
-	else if (size < 4)
-		size = 4;
-
-	size = ALIGN(size, align);
+		return -EINVAL;
+	size = ALIGN(min_t(size_t, size, 4), align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
 
 	if (!boundary)
 		boundary = allocation;
-	else if ((boundary < size) || (boundary & (boundary - 1)))
-		return NULL;
-
-	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
-	if (!retval)
-		return retval;
-
-	strlcpy(retval->name, name, sizeof(retval->name));
-
-	retval->dev = dev;
-
-	INIT_LIST_HEAD(&retval->page_list);
-	spin_lock_init(&retval->lock);
-	retval->size = size;
-	retval->boundary = boundary;
-	retval->allocation = allocation;
-
-	INIT_LIST_HEAD(&retval->pools);
+	if (boundary < size || (boundary & (boundary - 1)))
+		return -EINVAL;
+
+	strlcpy(pool->name, name, sizeof(pool->name));
+	pool->dev = dev;
+	INIT_LIST_HEAD(&pool->page_list);
+	spin_lock_init(&pool->lock);
+	pool->size = size;
+	pool->boundary = boundary;
+	pool->allocation = allocation;
+	INIT_LIST_HEAD(&pool->pools);
 
 	/*
 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
 	 * pools_reg_lock ensures that there is not a race between
-	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
+	 * __dma_pool_init() and dma_pool_exit() or within dma_pool_create()
 	 * when the first invocation of dma_pool_create() failed on
 	 * device_create_file() and the second assumes that it has been done (I
 	 * know it is a short window).
@@ -180,7 +135,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	mutex_lock(&pools_lock);
 	if (list_empty(&dev->dma_pools))
 		empty = true;
-	list_add(&retval->pools, &dev->dma_pools);
+	list_add(&pool->pools, &dev->dma_pools);
 	mutex_unlock(&pools_lock);
 	if (empty) {
 		int err;
@@ -188,18 +143,94 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 		err = device_create_file(dev, &dev_attr_pools);
 		if (err) {
 			mutex_lock(&pools_lock);
-			list_del(&retval->pools);
+			list_del(&pool->pools);
 			mutex_unlock(&pools_lock);
 			mutex_unlock(&pools_reg_lock);
-			kfree(retval);
-			return NULL;
+			return err;
 		}
 	}
 	mutex_unlock(&pools_reg_lock);
-	return retval;
+	return 0;
+}
+
+/**
+ * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @boundary: returned blocks won't cross this power of two boundary
+ * Context: not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc()
+ * may be used to allocate memory.  Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives.  The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ *  Return: a dma allocation pool with the requested characteristics, or
+ * %NULL if one can't be created.
+ */
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+				 size_t size, size_t align, size_t boundary)
+{
+	struct dma_pool *pool;
+
+	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, dev_to_node(dev));
+	if (!pool)
+		return NULL;
+	if (__dma_pool_init(dev, pool, name, size, align, boundary))
+		goto out_free_pool;
+	pool->is_coherent = true;
+	return pool;
+out_free_pool:
+	kfree(pool);
+	return NULL;
 }
 EXPORT_SYMBOL(dma_pool_create);
 
+/**
+ * dma_pool_init - initialize a pool DMA addressable memory
+ * @dev:	device that will be doing the DMA
+ * @pool:	pool to initialize
+ * @name:	name of pool, for diagnostics
+ * @size:	size of the blocks in this pool.
+ * @align:	alignment requirement for blocks; must be a power of two
+ * @boundary:	returned blocks won't cross this power of two boundary
+ * @dir:	DMA direction the allocations are going to be used for
+ *
+ * Context:	not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc() may be used to allocate memory.
+ * Such memory will have the same semantics as memory returned from
+ * dma_alloc_pages(), that is ownership needs to be transferred to and from the
+ * device.  The actual size of blocks allocated may be larger than requested
+ * because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir)
+{
+	int ret;
+
+	ret = __dma_pool_init(dev, pool, name, size, align, boundary);
+	if (ret)
+		return ret;
+	pool->is_coherent = false;
+	pool->dir = dir;
+	return 0;
+}
+EXPORT_SYMBOL(dma_pool_init);
+
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
 	unsigned int offset = 0;
@@ -223,8 +254,12 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 	page = kmalloc(sizeof(*page), mem_flags);
 	if (!page)
 		return NULL;
-	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
-					 &page->dma, mem_flags);
+	if (pool->is_coherent)
+		page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
+						 &page->dma, mem_flags);
+	else
+		page->vaddr = dma_alloc_pages(pool->dev, pool->allocation,
+					      &page->dma, pool->dir, mem_flags);
 	if (page->vaddr) {
 #ifdef	DMAPOOL_DEBUG
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
@@ -251,20 +286,25 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 #ifdef	DMAPOOL_DEBUG
 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
-	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+	if (pool->is_coherent)
+		dma_free_coherent(pool->dev, pool->allocation, page->vaddr,
+				  dma);
+	else
+		dma_free_pages(pool->dev, pool->allocation, page->vaddr, dma,
+			       pool->dir);
 	list_del(&page->page_list);
 	kfree(page);
 }
 
 /**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * dma_pool_exit - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
  * Context: !in_interrupt()
  *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
+ * Caller guarantees that no more memory from the pool is in use, and that
+ * nothing will try to use the pool after this call.
  */
-void dma_pool_destroy(struct dma_pool *pool)
+void dma_pool_exit(struct dma_pool *pool)
 {
 	bool empty = false;
 
@@ -299,7 +339,20 @@ void dma_pool_destroy(struct dma_pool *pool)
 		} else
 			pool_free_page(pool, page);
 	}
+}
+EXPORT_SYMBOL(dma_pool_exit);
 
+/**
+ * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void dma_pool_destroy(struct dma_pool *pool)
+{
+	dma_pool_exit(pool);
 	kfree(pool);
 }
 EXPORT_SYMBOL(dma_pool_destroy);
-- 
2.28.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Mauro Carvalho Chehab <mchehab@kernel.org>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Joonyoung Shim <jy0922.shim@samsung.com>,
	Seung-Woo Kim <sw0312.kim@samsung.com>,
	Kyungmin Park <kyungmin.park@samsung.com>,
	Ben Skeggs <bskeggs@redhat.com>, Pawel Osciak <pawel@osciak.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Matt Porter <mporter@kernel.crashing.org>,
	iommu@lists.linux-foundation.org
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-media@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-ia64@vger.kernel.org,
	linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org,
	linux-samsung-soc@vger.kernel.org, nouveau@lists.freedesktop.org,
	netdev@vger.kernel.org, linux-nvme@lists.infradead.org,
	linux-scsi@vger.kernel.org, linux-mm@kvack.org,
	alsa-devel@alsa-project.org
Subject: [PATCH 26/28] dmapool: add dma_alloc_pages support
Date: Wed, 19 Aug 2020 06:55:53 +0000	[thread overview]
Message-ID: <20200819065555.1802761-27-hch@lst.de> (raw)
In-Reply-To: <20200819065555.1802761-1-hch@lst.de>

Add an new variant of a dmapool that uses non-coherent memory from
dma_alloc_pages.  Unlike the existing mempool_create this one
initialized a pool allocated by the caller to avoid a pointless extra
allocation.  At some point it might be worth to also switch the coherent
allocation over to a similar dma_pool_init_coherent helper, but that is
better done as a separate series including a few conversions.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/dmapool.h |  23 ++++-
 mm/dmapool.c            | 211 +++++++++++++++++++++++++---------------
 2 files changed, 154 insertions(+), 80 deletions(-)

diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb423840..1387525c4e52e8 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,10 @@
 #ifndef LINUX_DMAPOOL_H
 #define	LINUX_DMAPOOL_H
 
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 
@@ -18,11 +22,28 @@ struct device;
 
 #ifdef CONFIG_HAS_DMA
 
+struct dma_pool {		/* the pool */
+	struct list_head page_list;
+	spinlock_t lock;
+	size_t size;
+	struct device *dev;
+	size_t allocation;
+	size_t boundary;
+	bool is_coherent;
+	enum dma_data_direction dir;
+	char name[32];
+	struct list_head pools;
+};
+
 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 
 			size_t size, size_t align, size_t allocation);
-
 void dma_pool_destroy(struct dma_pool *pool);
 
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir);
+void dma_pool_exit(struct dma_pool *pool);
+
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle);
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index f9fb9bbd733e0f..c60a48b22c8d6a 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -6,10 +6,10 @@
  * Copyright 2007 Intel Corporation
  *   Author: Matthew Wilcox <willy@linux.intel.com>
  *
- * This allocator returns small blocks of a given size which are DMA-able by
- * the given device.  It uses the dma_alloc_coherent page allocator to get
- * new pages, then splits them up into blocks of the required size.
- * Many older drivers still have their own code to do this.
+ * This allocator returns small blocks of a given size which are DMA-able by the
+ * given device.  It either uses the dma_alloc_coherent or the dma_alloc_pages
+ * allocator to get new pages, then splits them up into blocks of the required
+ * size.
  *
  * The current design of this allocator is fairly simple.  The pool is
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
@@ -39,17 +39,6 @@
 #define DMAPOOL_DEBUG 1
 #endif
 
-struct dma_pool {		/* the pool */
-	struct list_head page_list;
-	spinlock_t lock;
-	size_t size;
-	struct device *dev;
-	size_t allocation;
-	size_t boundary;
-	char name[32];
-	struct list_head pools;
-};
-
 struct dma_page {		/* cacheable header for 'allocation' bytes */
 	struct list_head page_list;
 	void *vaddr;
@@ -104,74 +93,40 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 
 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
 
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @boundary: returned blocks won't cross this power of two boundary
- * Context: not in_interrupt()
- *
- * Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory.  Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives.  The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary.  This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- *
- * Return: a dma allocation pool with the requested characteristics, or
- * %NULL if one can't be created.
- */
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
-				 size_t size, size_t align, size_t boundary)
+static int __dma_pool_init(struct device *dev, struct dma_pool *pool,
+		const char *name, size_t size, size_t align, size_t boundary)
 {
-	struct dma_pool *retval;
 	size_t allocation;
 	bool empty = false;
 
 	if (align = 0)
 		align = 1;
-	else if (align & (align - 1))
-		return NULL;
+	if (align & (align - 1))
+		return -EINVAL;
 
 	if (size = 0)
-		return NULL;
-	else if (size < 4)
-		size = 4;
-
-	size = ALIGN(size, align);
+		return -EINVAL;
+	size = ALIGN(min_t(size_t, size, 4), align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
 
 	if (!boundary)
 		boundary = allocation;
-	else if ((boundary < size) || (boundary & (boundary - 1)))
-		return NULL;
-
-	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
-	if (!retval)
-		return retval;
-
-	strlcpy(retval->name, name, sizeof(retval->name));
-
-	retval->dev = dev;
-
-	INIT_LIST_HEAD(&retval->page_list);
-	spin_lock_init(&retval->lock);
-	retval->size = size;
-	retval->boundary = boundary;
-	retval->allocation = allocation;
-
-	INIT_LIST_HEAD(&retval->pools);
+	if (boundary < size || (boundary & (boundary - 1)))
+		return -EINVAL;
+
+	strlcpy(pool->name, name, sizeof(pool->name));
+	pool->dev = dev;
+	INIT_LIST_HEAD(&pool->page_list);
+	spin_lock_init(&pool->lock);
+	pool->size = size;
+	pool->boundary = boundary;
+	pool->allocation = allocation;
+	INIT_LIST_HEAD(&pool->pools);
 
 	/*
 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
 	 * pools_reg_lock ensures that there is not a race between
-	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
+	 * __dma_pool_init() and dma_pool_exit() or within dma_pool_create()
 	 * when the first invocation of dma_pool_create() failed on
 	 * device_create_file() and the second assumes that it has been done (I
 	 * know it is a short window).
@@ -180,7 +135,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	mutex_lock(&pools_lock);
 	if (list_empty(&dev->dma_pools))
 		empty = true;
-	list_add(&retval->pools, &dev->dma_pools);
+	list_add(&pool->pools, &dev->dma_pools);
 	mutex_unlock(&pools_lock);
 	if (empty) {
 		int err;
@@ -188,18 +143,94 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 		err = device_create_file(dev, &dev_attr_pools);
 		if (err) {
 			mutex_lock(&pools_lock);
-			list_del(&retval->pools);
+			list_del(&pool->pools);
 			mutex_unlock(&pools_lock);
 			mutex_unlock(&pools_reg_lock);
-			kfree(retval);
-			return NULL;
+			return err;
 		}
 	}
 	mutex_unlock(&pools_reg_lock);
-	return retval;
+	return 0;
+}
+
+/**
+ * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @boundary: returned blocks won't cross this power of two boundary
+ * Context: not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc()
+ * may be used to allocate memory.  Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives.  The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ *  Return: a dma allocation pool with the requested characteristics, or
+ * %NULL if one can't be created.
+ */
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+				 size_t size, size_t align, size_t boundary)
+{
+	struct dma_pool *pool;
+
+	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, dev_to_node(dev));
+	if (!pool)
+		return NULL;
+	if (__dma_pool_init(dev, pool, name, size, align, boundary))
+		goto out_free_pool;
+	pool->is_coherent = true;
+	return pool;
+out_free_pool:
+	kfree(pool);
+	return NULL;
 }
 EXPORT_SYMBOL(dma_pool_create);
 
+/**
+ * dma_pool_init - initialize a pool DMA addressable memory
+ * @dev:	device that will be doing the DMA
+ * @pool:	pool to initialize
+ * @name:	name of pool, for diagnostics
+ * @size:	size of the blocks in this pool.
+ * @align:	alignment requirement for blocks; must be a power of two
+ * @boundary:	returned blocks won't cross this power of two boundary
+ * @dir:	DMA direction the allocations are going to be used for
+ *
+ * Context:	not in_interrupt()
+ *
+ * Given one of these pools, dma_pool_alloc() may be used to allocate memory.
+ * Such memory will have the same semantics as memory returned from
+ * dma_alloc_pages(), that is ownership needs to be transferred to and from the
+ * device.  The actual size of blocks allocated may be larger than requested
+ * because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+int dma_pool_init(struct device *dev, struct dma_pool *pool, const char *name,
+		size_t size, size_t align, size_t boundary,
+		enum dma_data_direction dir)
+{
+	int ret;
+
+	ret = __dma_pool_init(dev, pool, name, size, align, boundary);
+	if (ret)
+		return ret;
+	pool->is_coherent = false;
+	pool->dir = dir;
+	return 0;
+}
+EXPORT_SYMBOL(dma_pool_init);
+
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
 	unsigned int offset = 0;
@@ -223,8 +254,12 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 	page = kmalloc(sizeof(*page), mem_flags);
 	if (!page)
 		return NULL;
-	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
-					 &page->dma, mem_flags);
+	if (pool->is_coherent)
+		page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
+						 &page->dma, mem_flags);
+	else
+		page->vaddr = dma_alloc_pages(pool->dev, pool->allocation,
+					      &page->dma, pool->dir, mem_flags);
 	if (page->vaddr) {
 #ifdef	DMAPOOL_DEBUG
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
@@ -251,20 +286,25 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 #ifdef	DMAPOOL_DEBUG
 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
-	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+	if (pool->is_coherent)
+		dma_free_coherent(pool->dev, pool->allocation, page->vaddr,
+				  dma);
+	else
+		dma_free_pages(pool->dev, pool->allocation, page->vaddr, dma,
+			       pool->dir);
 	list_del(&page->page_list);
 	kfree(page);
 }
 
 /**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * dma_pool_exit - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
  * Context: !in_interrupt()
  *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
+ * Caller guarantees that no more memory from the pool is in use, and that
+ * nothing will try to use the pool after this call.
  */
-void dma_pool_destroy(struct dma_pool *pool)
+void dma_pool_exit(struct dma_pool *pool)
 {
 	bool empty = false;
 
@@ -299,7 +339,20 @@ void dma_pool_destroy(struct dma_pool *pool)
 		} else
 			pool_free_page(pool, page);
 	}
+}
+EXPORT_SYMBOL(dma_pool_exit);
 
+/**
+ * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void dma_pool_destroy(struct dma_pool *pool)
+{
+	dma_pool_exit(pool);
 	kfree(pool);
 }
 EXPORT_SYMBOL(dma_pool_destroy);
-- 
2.28.0

  parent reply	other threads:[~2020-08-19  7:03 UTC|newest]

Thread overview: 553+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20200819065610eucas1p2fde88e81917071b1888e7cc01ba0f298@eucas1p2.samsung.com>
2020-08-19  6:55 ` a saner API for allocating DMA addressable pages Christoph Hellwig
2020-08-19  6:55   ` Christoph Hellwig
2020-08-19  6:55   ` Christoph Hellwig
2020-08-19  6:55   ` Christoph Hellwig
2020-08-19  6:55   ` Christoph Hellwig
2020-08-19  6:55   ` Christoph Hellwig
2020-08-19  6:55   ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 01/28] mm: turn alloc_pages into an inline function Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 02/28] drm/exynos: stop setting DMA_ATTR_NON_CONSISTENT Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 03/28] drm/nouveau/gk20a: " Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 04/28] net/au1000-eth: stop using DMA_ATTR_NON_CONSISTENT Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 05/28] media/v4l2: remove V4L2-FLAG-MEMORY-NON-CONSISTENT Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19 11:16     ` Tomasz Figa
2020-08-19 11:16       ` Tomasz Figa
2020-08-19 11:16       ` Tomasz Figa
2020-08-19 11:16       ` Tomasz Figa
2020-08-19 11:16       ` Tomasz Figa
2020-08-19 11:16       ` Tomasz Figa
2020-08-19 11:16       ` Tomasz Figa
2020-08-19 11:16       ` Tomasz Figa
2020-08-19 11:51       ` Robin Murphy
2020-08-19 11:51         ` Robin Murphy
2020-08-19 11:51         ` Robin Murphy
2020-08-19 11:51         ` Robin Murphy
2020-08-19 11:51         ` Robin Murphy
2020-08-19 11:51         ` Robin Murphy
2020-08-19 12:49         ` Tomasz Figa
2020-08-19 12:49           ` Tomasz Figa
2020-08-19 12:49           ` Tomasz Figa
2020-08-19 12:49           ` Tomasz Figa
2020-08-19 12:49           ` Tomasz Figa
2020-08-19 12:49           ` Tomasz Figa
2020-08-19 12:49           ` Tomasz Figa
2020-08-19 12:49           ` Tomasz Figa
2020-08-19 13:57           ` Christoph Hellwig
2020-08-19 13:57             ` Christoph Hellwig
2020-08-19 13:57             ` Christoph Hellwig
2020-08-19 13:57             ` Christoph Hellwig
2020-08-19 13:57             ` Christoph Hellwig
2020-08-19 13:57             ` Christoph Hellwig
2020-08-19 14:11             ` Tomasz Figa
2020-08-19 14:11               ` Tomasz Figa
2020-08-19 14:11               ` Tomasz Figa
2020-08-19 14:11               ` Tomasz Figa
2020-08-19 14:11               ` Tomasz Figa
2020-08-19 14:11               ` Tomasz Figa
2020-08-19 14:11               ` Tomasz Figa
2020-08-19 14:11               ` Tomasz Figa
2020-08-20  4:45               ` Christoph Hellwig
2020-08-20  4:45                 ` Christoph Hellwig
2020-08-20  4:45                 ` Christoph Hellwig
2020-08-20  4:45                 ` Christoph Hellwig
2020-08-20  4:45                 ` Christoph Hellwig
2020-08-20  4:45                 ` Christoph Hellwig
2020-08-20 10:09                 ` Tomasz Figa
2020-08-20 10:09                   ` Tomasz Figa
2020-08-20 10:09                   ` Tomasz Figa
2020-08-20 10:09                   ` Tomasz Figa
2020-08-20 10:09                   ` Tomasz Figa
2020-08-20 10:09                   ` Tomasz Figa
2020-08-20 10:09                   ` Tomasz Figa
2020-08-20 10:09                   ` Tomasz Figa
2020-08-20 16:51                   ` Christoph Hellwig
2020-08-20 16:51                     ` Christoph Hellwig
2020-08-20 16:51                     ` Christoph Hellwig
2020-08-20 16:51                     ` Christoph Hellwig
2020-08-20 16:51                     ` Christoph Hellwig
2020-08-20 16:51                     ` Christoph Hellwig
2020-08-19 14:07           ` Robin Murphy
2020-08-19 14:07             ` Robin Murphy
2020-08-19 14:07             ` Robin Murphy
2020-08-19 14:07             ` Robin Murphy
2020-08-19 14:07             ` Robin Murphy
2020-08-19 14:07             ` Robin Murphy
2020-08-19 14:22             ` Tomasz Figa
2020-08-19 14:22               ` Tomasz Figa
2020-08-19 14:22               ` Tomasz Figa
2020-08-19 14:22               ` Tomasz Figa
2020-08-19 14:22               ` Tomasz Figa
2020-08-19 14:22               ` Tomasz Figa
2020-08-19 14:22               ` Tomasz Figa
2020-08-19 14:22               ` Tomasz Figa
2020-08-20  4:52               ` Christoph Hellwig
2020-08-20  4:52                 ` Christoph Hellwig
2020-08-20  4:52                 ` Christoph Hellwig
2020-08-20  4:52                 ` Christoph Hellwig
2020-08-20  4:52                 ` Christoph Hellwig
2020-08-20  4:52                 ` Christoph Hellwig
2020-08-20  5:02             ` Christoph Hellwig
2020-08-20  5:02               ` Christoph Hellwig
2020-08-20  5:02               ` Christoph Hellwig
2020-08-20  5:02               ` Christoph Hellwig
2020-08-20  5:02               ` Christoph Hellwig
2020-08-20  5:02               ` Christoph Hellwig
2020-08-20 10:24               ` Tomasz Figa
2020-08-20 10:24                 ` Tomasz Figa
2020-08-20 10:24                 ` Tomasz Figa
2020-08-20 10:24                 ` Tomasz Figa
2020-08-20 10:24                 ` Tomasz Figa
2020-08-20 10:24                 ` Tomasz Figa
2020-08-20 10:24                 ` Tomasz Figa
2020-08-20 10:24                 ` Tomasz Figa
2020-08-20 16:52                 ` Christoph Hellwig
2020-08-20 16:52                   ` Christoph Hellwig
2020-08-20 16:52                   ` Christoph Hellwig
2020-08-20 16:52                   ` Christoph Hellwig
2020-08-20 16:52                   ` Christoph Hellwig
2020-08-20 16:52                   ` Christoph Hellwig
2020-08-20 17:41                   ` Tomasz Figa
2020-08-20 17:41                     ` Tomasz Figa
2020-08-20 17:41                     ` Tomasz Figa
2020-08-20 17:41                     ` Tomasz Figa
2020-08-20 17:41                     ` Tomasz Figa
2020-08-20 17:41                     ` Tomasz Figa
2020-08-20 17:41                     ` Tomasz Figa
2020-08-20 17:41                     ` Tomasz Figa
2020-08-19 13:54       ` Christoph Hellwig
2020-08-19 13:54         ` Christoph Hellwig
2020-08-19 13:54         ` Christoph Hellwig
2020-08-19 13:54         ` Christoph Hellwig
2020-08-19 13:54         ` Christoph Hellwig
2020-08-19 13:54         ` Christoph Hellwig
2020-08-19 13:54         ` Christoph Hellwig
2020-08-19 13:57         ` Tomasz Figa
2020-08-19 13:57           ` Tomasz Figa
2020-08-19 13:57           ` Tomasz Figa
2020-08-19 13:57           ` Tomasz Figa
2020-08-19 13:57           ` Tomasz Figa
2020-08-19 13:57           ` Tomasz Figa
2020-08-19 13:57           ` Tomasz Figa
2020-08-19 13:57           ` Tomasz Figa
2020-08-20  4:43           ` Christoph Hellwig
2020-08-20  4:43             ` Christoph Hellwig
2020-08-20  4:43             ` Christoph Hellwig
2020-08-20  4:43             ` Christoph Hellwig
2020-08-20  4:43             ` Christoph Hellwig
2020-08-20  4:43             ` Christoph Hellwig
2020-08-20  4:43             ` Christoph Hellwig
2020-08-20  5:20             ` Christoph Hellwig
2020-08-20  5:20               ` Christoph Hellwig
2020-08-20  5:20               ` Christoph Hellwig
2020-08-20  5:20               ` Christoph Hellwig
2020-08-20  5:20               ` Christoph Hellwig
2020-08-20  5:20               ` Christoph Hellwig
2020-08-20  5:20               ` Christoph Hellwig
2020-08-20 10:05               ` Tomasz Figa
2020-08-20 10:05                 ` Tomasz Figa
2020-08-20 10:05                 ` Tomasz Figa
2020-08-20 10:05                 ` Tomasz Figa
2020-08-20 10:05                 ` Tomasz Figa
2020-08-20 10:05                 ` Tomasz Figa
2020-08-20 10:05                 ` Tomasz Figa
2020-08-20 10:05                 ` Tomasz Figa
2020-08-20 16:54                 ` Christoph Hellwig
2020-08-20 16:54                   ` Christoph Hellwig
2020-08-20 16:54                   ` Christoph Hellwig
2020-08-20 16:54                   ` Christoph Hellwig
2020-08-20 16:54                   ` Christoph Hellwig
2020-08-20 16:54                   ` Christoph Hellwig
2020-08-20 16:54                   ` Christoph Hellwig
2020-08-20 17:33                   ` Tomasz Figa
2020-08-20 17:33                     ` Tomasz Figa
2020-08-20 17:33                     ` Tomasz Figa
2020-08-20 17:33                     ` Tomasz Figa
2020-08-20 17:33                     ` Tomasz Figa
2020-08-20 17:33                     ` Tomasz Figa
2020-08-20 17:33                     ` Tomasz Figa
2020-08-20 17:33                     ` Tomasz Figa
2020-09-01 11:06                     ` Christoph Hellwig
2020-09-01 11:06                       ` Christoph Hellwig
2020-09-01 11:06                       ` Christoph Hellwig
2020-09-01 11:06                       ` Christoph Hellwig
2020-09-01 11:06                       ` Christoph Hellwig
2020-09-01 11:06                       ` Christoph Hellwig
2020-09-01 11:06                       ` Christoph Hellwig
2020-09-01 15:02                       ` Tomasz Figa
2020-09-01 15:02                         ` Tomasz Figa
2020-09-01 15:02                         ` Tomasz Figa
2020-09-01 15:02                         ` Tomasz Figa
2020-09-01 15:02                         ` Tomasz Figa
2020-09-01 15:02                         ` Tomasz Figa
2020-09-01 15:02                         ` Tomasz Figa
2020-09-01 15:02                         ` Tomasz Figa
2020-09-08 21:58                         ` Tomasz Figa
2020-09-08 22:09                           ` Tomasz Figa
2020-09-10  9:49                           ` Sergey Senozhatsky
2020-09-10  9:57                             ` Hans Verkuil
2020-09-10 10:14                               ` Sergey Senozhatsky
2020-09-10 10:23                                 ` Hans Verkuil
2020-09-10 14:48                                   ` Sergey Senozhatsky
2020-09-10 15:38                                     ` Sergey Senozhatsky
2020-08-19  6:55   ` [PATCH 06/28] lib82596: move DMA allocation into the callers of i82596_probe Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-09-01 13:29     ` Thomas Bogendoerfer
2020-09-01 13:29       ` Thomas Bogendoerfer
2020-09-01 13:29       ` Thomas Bogendoerfer
2020-09-01 13:29       ` Thomas Bogendoerfer
2020-09-01 13:29       ` Thomas Bogendoerfer
2020-09-01 13:29       ` Thomas Bogendoerfer
2020-09-01 13:29       ` Thomas Bogendoerfer
2020-08-19  6:55   ` [PATCH 07/28] 53c700: improve non-coherent DMA handling Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-09-01 14:52     ` James Bottomley
2020-09-01 14:52       ` James Bottomley
2020-09-01 14:52       ` James Bottomley
2020-09-01 14:52       ` James Bottomley
2020-09-01 14:52       ` James Bottomley
2020-09-01 14:52       ` James Bottomley
2020-09-01 14:52       ` James Bottomley
2020-09-01 14:52       ` James Bottomley
2020-09-01 15:05       ` Matthew Wilcox
2020-09-01 15:05         ` Matthew Wilcox
2020-09-01 15:05         ` Matthew Wilcox
2020-09-01 15:05         ` Matthew Wilcox
2020-09-01 15:05         ` Matthew Wilcox
2020-09-01 15:05         ` Matthew Wilcox
2020-09-01 15:05         ` Matthew Wilcox
2020-09-01 15:22         ` James Bottomley
2020-09-01 15:22           ` James Bottomley
2020-09-01 15:22           ` James Bottomley
2020-09-01 15:22           ` James Bottomley
2020-09-01 15:22           ` James Bottomley
2020-09-01 15:22           ` James Bottomley
2020-09-01 15:22           ` James Bottomley
2020-09-01 15:22           ` James Bottomley
2020-09-01 16:21           ` Helge Deller
2020-09-01 16:21             ` Helge Deller
2020-09-01 16:21             ` Helge Deller
2020-09-01 16:21             ` Helge Deller
2020-09-01 16:21             ` Helge Deller
2020-09-01 16:21             ` Helge Deller
2020-09-01 16:21             ` Helge Deller
2020-09-01 16:41             ` Helge Deller
2020-09-01 16:41               ` Helge Deller
2020-09-01 16:41               ` Helge Deller
2020-09-01 16:41               ` Helge Deller
2020-09-01 16:41               ` Helge Deller
2020-09-01 16:41               ` Helge Deller
2020-09-01 16:41               ` Helge Deller
2020-09-01 16:53               ` Matthew Wilcox
2020-09-01 16:53                 ` Matthew Wilcox
2020-09-01 16:53                 ` Matthew Wilcox
2020-09-01 16:53                 ` Matthew Wilcox
2020-09-01 16:53                 ` Matthew Wilcox
2020-09-01 16:53                 ` Matthew Wilcox
2020-09-01 16:53                 ` Matthew Wilcox
2020-09-02 15:00                 ` Helge Deller
2020-09-02 15:00                   ` Helge Deller
2020-09-02 15:00                   ` Helge Deller
2020-09-02 15:00                   ` Helge Deller
2020-09-02 15:00                   ` Helge Deller
2020-09-02 15:00                   ` Helge Deller
2020-09-02 15:00                   ` Helge Deller
2020-08-19  6:55   ` [PATCH 08/28] MIPS: make dma_sync_*_for_cpu a little less overzealous Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-09-01 13:53     ` Thomas Bogendoerfer
2020-09-01 13:53       ` Thomas Bogendoerfer
2020-09-01 13:53       ` Thomas Bogendoerfer
2020-09-01 13:53       ` Thomas Bogendoerfer
2020-09-01 13:53       ` Thomas Bogendoerfer
2020-09-01 13:53       ` Thomas Bogendoerfer
2020-09-01 13:53       ` Thomas Bogendoerfer
2020-08-19  6:55   ` [PATCH 09/28] MIPS/jazzdma: remove the unused vdma_remap function Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-09-01 13:49     ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-08-19  6:55   ` [PATCH 10/28] MIPS/jazzdma: decouple from dma-direct Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-09-01 13:49     ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-09-01 13:49       ` Thomas Bogendoerfer
2020-08-19  6:55   ` [PATCH 11/28] dma-mapping: add (back) arch_dma_mark_clean for ia64 Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 12/28] dma-direct: remove dma_direct_{alloc,free}_pages Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 13/28] dma-direct: lift gfp_t manipulation out of__dma_direct_alloc_pages Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 14/28] dma-direct: use phys_to_dma_direct in dma_direct_alloc Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 15/28] dma-direct: remove __dma_to_phys Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 16/28] dma-direct: rename and cleanup __phys_to_dma Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 17/28] dma-mapping: move dma_common_{mmap,get_sgtable} out of mapping.c Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` [PATCH 17/28] dma-mapping: move dma_common_{mmap, get_sgtable} " Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 18/28] dma-mapping: move the dma_declare_coherent_memory documentation Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc,free}_pages Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` [PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc, free}_pages Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19 15:03     ` Tomasz Figa
2020-08-19 15:03       ` Tomasz Figa
2020-08-19 15:03       ` Tomasz Figa
2020-08-19 15:03       ` Tomasz Figa
2020-08-19 15:03       ` Tomasz Figa
2020-08-19 15:03       ` Tomasz Figa
2020-08-19 15:03       ` Tomasz Figa
2020-08-19 15:03       ` Tomasz Figa
2020-08-20  5:15       ` Christoph Hellwig
2020-08-20  5:15         ` Christoph Hellwig
2020-08-20  5:15         ` Christoph Hellwig
2020-08-20  5:15         ` Christoph Hellwig
2020-08-20  5:15         ` Christoph Hellwig
2020-08-20  5:15         ` Christoph Hellwig
2020-08-20  5:15         ` Christoph Hellwig
2020-08-19 16:46     ` kernel test robot
2020-08-19  6:55   ` [PATCH 20/28] sgiwd93: convert from dma_cache_sync to dma_sync_single_for_device Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 21/28] hal2: " Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 22/28] sgiseeq: " Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-09-01 15:22     ` Thomas Bogendoerfer
2020-09-01 15:22       ` Thomas Bogendoerfer
2020-09-01 15:22       ` Thomas Bogendoerfer
2020-09-01 15:22       ` Thomas Bogendoerfer
2020-09-01 15:22       ` Thomas Bogendoerfer
2020-09-01 15:22       ` Thomas Bogendoerfer
2020-09-01 15:22       ` Thomas Bogendoerfer
2020-09-01 17:12       ` Thomas Bogendoerfer
2020-09-01 17:12         ` Thomas Bogendoerfer
2020-09-01 17:12         ` Thomas Bogendoerfer
2020-09-01 17:12         ` Thomas Bogendoerfer
2020-09-01 17:12         ` Thomas Bogendoerfer
2020-09-01 17:12         ` Thomas Bogendoerfer
2020-09-01 17:12         ` Thomas Bogendoerfer
2020-09-01 17:16         ` Christoph Hellwig
2020-09-01 17:16           ` Christoph Hellwig
2020-09-01 17:16           ` Christoph Hellwig
2020-09-01 17:16           ` Christoph Hellwig
2020-09-01 17:16           ` Christoph Hellwig
2020-09-01 17:16           ` Christoph Hellwig
2020-09-01 17:16           ` Christoph Hellwig
2020-09-01 17:38           ` Thomas Bogendoerfer
2020-09-01 17:38             ` Thomas Bogendoerfer
2020-09-01 17:38             ` Thomas Bogendoerfer
2020-09-01 17:38             ` Thomas Bogendoerfer
2020-09-01 17:38             ` Thomas Bogendoerfer
2020-09-01 17:38             ` Thomas Bogendoerfer
2020-09-01 17:38             ` Thomas Bogendoerfer
2020-09-02 21:38             ` Thomas Bogendoerfer
2020-09-02 21:38               ` Thomas Bogendoerfer
2020-09-02 21:38               ` Thomas Bogendoerfer
2020-09-02 21:38               ` Thomas Bogendoerfer
2020-09-02 21:38               ` Thomas Bogendoerfer
2020-09-02 21:38               ` Thomas Bogendoerfer
2020-09-02 21:38               ` Thomas Bogendoerfer
2020-09-03  8:42               ` Christoph Hellwig
2020-09-03  8:42                 ` Christoph Hellwig
2020-09-03  8:42                 ` Christoph Hellwig
2020-09-03  8:42                 ` Christoph Hellwig
2020-09-03  8:42                 ` Christoph Hellwig
2020-09-03  8:42                 ` Christoph Hellwig
2020-09-03  8:42                 ` Christoph Hellwig
2020-09-03  8:43             ` Christoph Hellwig
2020-09-03  8:43               ` Christoph Hellwig
2020-09-03  8:43               ` Christoph Hellwig
2020-09-03  8:43               ` Christoph Hellwig
2020-09-03  8:43               ` Christoph Hellwig
2020-09-03  8:43               ` Christoph Hellwig
2020-09-03  8:43               ` Christoph Hellwig
2020-09-03  8:46               ` Christoph Hellwig
2020-09-03  8:46                 ` Christoph Hellwig
2020-09-03  8:46                 ` Christoph Hellwig
2020-09-03  8:46                 ` Christoph Hellwig
2020-09-03  8:46                 ` Christoph Hellwig
2020-09-03  8:46                 ` Christoph Hellwig
2020-09-03  8:46                 ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 23/28] lib82596: " Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 24/28] 53c700: " Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 25/28] dma-mapping: remove dma_cache_sync Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` Christoph Hellwig [this message]
2020-08-19  6:55     ` [PATCH 26/28] dmapool: add dma_alloc_pages support Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 27/28] nvme-pci: fix PRP pool size Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55   ` [PATCH 28/28] nvme-pci: use dma_alloc_pages backed dmapools Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-19  6:55     ` Christoph Hellwig
2020-08-25 11:30   ` a saner API for allocating DMA addressable pages Marek Szyprowski
2020-08-25 11:30     ` Marek Szyprowski
2020-08-25 11:30     ` Marek Szyprowski
2020-08-25 11:30     ` Marek Szyprowski
2020-08-25 11:30     ` Marek Szyprowski
2020-08-25 11:30     ` Marek Szyprowski
2020-08-25 11:30     ` Marek Szyprowski
2020-08-25 13:26     ` Christoph Hellwig
2020-08-25 13:26       ` Christoph Hellwig
2020-08-25 13:26       ` Christoph Hellwig
2020-08-25 13:26       ` Christoph Hellwig
2020-08-25 13:26       ` Christoph Hellwig
2020-08-25 13:26       ` Christoph Hellwig
2020-08-25 13:26       ` Christoph Hellwig
2020-08-29  9:46   ` Helge Deller
2020-08-29  9:46     ` Helge Deller
2020-08-29  9:46     ` Helge Deller
2020-08-29  9:46     ` Helge Deller
2020-08-29  9:46     ` Helge Deller
2020-08-29  9:46     ` Helge Deller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200819065555.1802761-27-hch@lst.de \
    --to=hch@lst.de \
    --cc=James.Bottomley@HansenPartnership.com \
    --cc=alsa-devel@alsa-project.org \
    --cc=bskeggs@redhat.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jy0922.shim@samsung.com \
    --cc=kyungmin.park@samsung.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-samsung-soc@vger.kernel.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=m.szyprowski@samsung.com \
    --cc=mchehab@kernel.org \
    --cc=mporter@kernel.crashing.org \
    --cc=netdev@vger.kernel.org \
    --cc=nouveau@lists.freedesktop.org \
    --cc=pawel@osciak.com \
    --cc=sw0312.kim@samsung.com \
    --cc=thomas.lendacky@amd.com \
    --cc=tsbogend@alpha.franken.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.