All of lore.kernel.org
 help / color / mirror / Atom feed
From: Baoquan He <bhe@redhat.com>
To: Christoph Hellwig <hch@lst.de>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	akpm@linux-foundation.org, cl@linux.com, 42.hyeyoo@gmail.com,
	penberg@kernel.org, rientjes@google.com, iamjoonsoo.kim@lge.com,
	vbabka@suse.cz, David.Laight@aculab.com, david@redhat.com,
	herbert@gondor.apana.org.au, davem@davemloft.net,
	linux-crypto@vger.kernel.org, steffen.klassert@secunet.com,
	netdev@vger.kernel.org, hca@linux.ibm.com, gor@linux.ibm.com,
	agordeev@linux.ibm.com, borntraeger@linux.ibm.com,
	svens@linux.ibm.com, linux-s390@vger.kernel.org,
	michael@walle.cc, linux-i2c@vger.kernel.org, wsa@kernel.org
Subject: [PATCH 2/2] kernel/dma: rename dma_alloc_direct and dma_map_direct
Date: Tue, 22 Feb 2022 21:42:22 +0800	[thread overview]
Message-ID: <YhToPpeuTqdQgC80@MiWiFi-R3L-srv> (raw)
In-Reply-To: <20220222131120.GB10093@lst.de>

In the old dma mapping, coherent mapping uses dma_alloc_coherent() to
allocate DMA buffer and mapping; while streaming mapping can only get
memory from slab or buddy allocator, then map with dma_map_single().
In that situation, dma_alloc_direct() checks a direct mapping for
coherent DMA, dma_map_direct() checks a direct mapping for streaming
DMA.

However, several new APIs have been added for streaming mapping, e.g
dma_alloc_pages(). These new APIs take care of DMA buffer allocating
and mapping which are similar with dma_alloc_coherent(). So we should
rename both of them to reflect their real intention to avoid confusion.

       dma_alloc_direct()  ==>  dma_coherent_direct()
       dma_map_direct()    ==>  dma_streaming_direct()

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 kernel/dma/mapping.c | 44 ++++++++++++++++++++++----------------------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index e66847aeac67..2835b08e96c6 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -127,13 +127,13 @@ static bool dma_go_direct(struct device *dev, dma_addr_t mask,
  * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
  * enough.
  */
-static inline bool dma_alloc_direct(struct device *dev,
+static inline bool dma_coherent_direct(struct device *dev,
 		const struct dma_map_ops *ops)
 {
 	return dma_go_direct(dev, dev->coherent_dma_mask, ops);
 }
 
-static inline bool dma_map_direct(struct device *dev,
+static inline bool dma_streaming_direct(struct device *dev,
 		const struct dma_map_ops *ops)
 {
 	return dma_go_direct(dev, *dev->dma_mask, ops);
@@ -151,7 +151,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
 	if (WARN_ON_ONCE(!dev->dma_mask))
 		return DMA_MAPPING_ERROR;
 
-	if (dma_map_direct(dev, ops) ||
+	if (dma_streaming_direct(dev, ops) ||
 	    arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
 	else
@@ -168,7 +168,7 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
-	if (dma_map_direct(dev, ops) ||
+	if (dma_streaming_direct(dev, ops) ||
 	    arch_dma_unmap_page_direct(dev, addr + size))
 		dma_direct_unmap_page(dev, addr, size, dir, attrs);
 	else if (ops->unmap_page)
@@ -188,7 +188,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 	if (WARN_ON_ONCE(!dev->dma_mask))
 		return 0;
 
-	if (dma_map_direct(dev, ops) ||
+	if (dma_streaming_direct(dev, ops) ||
 	    arch_dma_map_sg_direct(dev, sg, nents))
 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
 	else
@@ -277,7 +277,7 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
 
 	BUG_ON(!valid_dma_direction(dir));
 	debug_dma_unmap_sg(dev, sg, nents, dir);
-	if (dma_map_direct(dev, ops) ||
+	if (dma_streaming_direct(dev, ops) ||
 	    arch_dma_unmap_sg_direct(dev, sg, nents))
 		dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
 	else if (ops->unmap_sg)
@@ -296,7 +296,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
 	if (WARN_ON_ONCE(!dev->dma_mask))
 		return DMA_MAPPING_ERROR;
 
-	if (dma_map_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
 	else if (ops->map_resource)
 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
@@ -312,7 +312,7 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
-	if (!dma_map_direct(dev, ops) && ops->unmap_resource)
+	if (!dma_streaming_direct(dev, ops) && ops->unmap_resource)
 		ops->unmap_resource(dev, addr, size, dir, attrs);
 	debug_dma_unmap_resource(dev, addr, size, dir);
 }
@@ -324,7 +324,7 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
-	if (dma_map_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
 	else if (ops->sync_single_for_cpu)
 		ops->sync_single_for_cpu(dev, addr, size, dir);
@@ -338,7 +338,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
-	if (dma_map_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		dma_direct_sync_single_for_device(dev, addr, size, dir);
 	else if (ops->sync_single_for_device)
 		ops->sync_single_for_device(dev, addr, size, dir);
@@ -352,7 +352,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
-	if (dma_map_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
 	else if (ops->sync_sg_for_cpu)
 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
@@ -366,7 +366,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
-	if (dma_map_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
 	else if (ops->sync_sg_for_device)
 		ops->sync_sg_for_device(dev, sg, nelems, dir);
@@ -391,7 +391,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
-	if (dma_alloc_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
 				size, attrs);
 	if (!ops->get_sgtable)
@@ -430,7 +430,7 @@ bool dma_can_mmap(struct device *dev)
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
-	if (dma_alloc_direct(dev, ops))
+	if (dma_coherent_direct(dev, ops))
 		return dma_direct_can_mmap(dev);
 	return ops->mmap != NULL;
 }
@@ -455,7 +455,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
-	if (dma_alloc_direct(dev, ops))
+	if (dma_coherent_direct(dev, ops))
 		return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
 				attrs);
 	if (!ops->mmap)
@@ -468,7 +468,7 @@ u64 dma_get_required_mask(struct device *dev)
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
-	if (dma_alloc_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		return dma_direct_get_required_mask(dev);
 	if (ops->get_required_mask)
 		return ops->get_required_mask(dev);
@@ -499,7 +499,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
 	/* let the implementation decide on the zone to allocate from: */
 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 
-	if (dma_alloc_direct(dev, ops))
+	if (dma_coherent_direct(dev, ops))
 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
 	else if (ops->alloc)
 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
@@ -531,7 +531,7 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 		return;
 
 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-	if (dma_alloc_direct(dev, ops))
+	if (dma_coherent_direct(dev, ops))
 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
 	else if (ops->free)
 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
@@ -550,7 +550,7 @@ static struct page *__dma_alloc_pages(struct device *dev, size_t size,
         gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 
 	size = PAGE_ALIGN(size);
-	if (dma_alloc_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
 	if (!ops->alloc_pages)
 		return NULL;
@@ -574,7 +574,7 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	size = PAGE_ALIGN(size);
-	if (dma_alloc_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		dma_direct_free_pages(dev, size, page, dma_handle, dir);
 	else if (ops->free_pages)
 		ops->free_pages(dev, size, page, dma_handle, dir);
@@ -769,7 +769,7 @@ size_t dma_max_mapping_size(struct device *dev)
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 	size_t size = SIZE_MAX;
 
-	if (dma_map_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		size = dma_direct_max_mapping_size(dev);
 	else if (ops && ops->max_mapping_size)
 		size = ops->max_mapping_size(dev);
@@ -782,7 +782,7 @@ bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
-	if (dma_map_direct(dev, ops))
+	if (dma_streaming_direct(dev, ops))
 		return dma_direct_need_sync(dev, dma_addr);
 	return ops->sync_single_for_cpu || ops->sync_single_for_device;
 }
-- 
2.31.1


  parent reply	other threads:[~2022-02-22 13:42 UTC|newest]

Thread overview: 74+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-19  0:51 [PATCH 00/22] Don't use kmalloc() with GFP_DMA Baoquan He
2022-02-19  0:52 ` [PATCH 01/22] parisc: pci-dma: remove stale code and comment Baoquan He
2022-02-19  7:07   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 02/22] net: moxa: Don't use GFP_DMA when calling dma_alloc_coherent() Baoquan He
2022-02-19  7:07   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 03/22] gpu: ipu-v3: " Baoquan He
2022-02-19  7:07   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 04/22] drm/sti: Don't use GFP_DMA when calling dma_alloc_wc() Baoquan He
2022-02-19  7:08   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 05/22] sound: n64: Don't use GFP_DMA when calling dma_alloc_coherent() Baoquan He
2022-02-19  7:08   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 06/22] fbdev: da8xx: " Baoquan He
2022-02-19  7:08   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 07/22] fbdev: mx3fb: Don't use GFP_DMA when calling dma_alloc_wc() Baoquan He
2022-02-19  7:08   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 08/22] usb: gadget: lpc32xx_udc: Don't use GFP_DMA when calling dma_alloc_coherent() Baoquan He
2022-02-19  7:09   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 09/22] usb: cdns3: " Baoquan He
2022-02-19  7:09   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 10/22] uio: pruss: " Baoquan He
2022-02-19  7:09   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 11/22] staging: emxx_udc: " Baoquan He
2022-02-19  6:51   ` Wolfram Sang
2022-02-20  1:55     ` Baoquan He
2022-02-19  7:09   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 12/22] " Baoquan He
2022-02-19  7:10   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 13/22] spi: atmel: " Baoquan He
2022-02-19  7:10   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 14/22] spi: spi-ti-qspi: " Baoquan He
2022-02-19  7:12   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 15/22] usb: cdns3: Don't use GFP_DMA32 when calling dma_pool_alloc() Baoquan He
2022-02-19  7:13   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 16/22] usb: udc: lpc32xx: Don't use GFP_DMA " Baoquan He
2022-02-19  7:13   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 17/22] net: marvell: prestera: " Baoquan He
2022-02-19  4:54   ` Jakub Kicinski
2022-02-20  2:06     ` Baoquan He
2022-02-19  7:13   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 18/22] net: ethernet: mtk-star-emac: Don't use GFP_DMA when calling dmam_alloc_coherent() Baoquan He
2022-02-19  7:13   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 19/22] ethernet: rocker: Use dma_alloc_noncoherent() for dma buffer Baoquan He
2022-02-19  7:14   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 20/22] HID: intel-ish-hid: " Baoquan He
2022-02-19  7:14   ` Christoph Hellwig
2022-02-19  0:52 ` [PATCH 21/22] mmc: wbsd: " Baoquan He
2022-02-19  7:17   ` Christoph Hellwig
2022-02-20  8:40     ` Baoquan He
2022-02-22  8:45       ` Christoph Hellwig
2022-02-22  9:14         ` Baoquan He
2022-02-22 13:11           ` Christoph Hellwig
2022-02-22 13:40             ` Baoquan He
2022-02-22 13:41             ` [PATCH 1/2] dma-mapping: check dma_mask for streaming mapping allocs Baoquan He
2022-02-22 15:59               ` Christoph Hellwig
2022-02-23  0:28                 ` Baoquan He
2022-02-23 14:25                   ` Christoph Hellwig
2022-02-23 14:57                     ` David Laight
2022-02-24 14:11                     ` Baoquan He
2022-02-24 14:27                       ` David Laight
2022-02-25 15:39                         ` 'Baoquan He'
2022-02-22 13:42             ` Baoquan He [this message]
2022-02-22 15:59               ` [PATCH 2/2] kernel/dma: rename dma_alloc_direct and dma_map_direct Christoph Hellwig
2022-02-19  0:52 ` [PATCH 22/22] mtd: rawnand: Use dma_alloc_noncoherent() for dma buffer Baoquan He
2022-02-19  7:19   ` Christoph Hellwig
2022-02-19 11:18     ` Hyeonggon Yoo
2022-02-22  8:46       ` Christoph Hellwig
2022-02-22  9:06         ` David Laight
2022-02-22 13:16           ` 'Christoph Hellwig'
2022-02-21 13:57 ` [PATCH 00/22] Don't use kmalloc() with GFP_DMA Heiko Carstens
2022-02-22  8:44   ` Christoph Hellwig
2022-02-22 13:12     ` Baoquan He
2022-02-22 13:26       ` Baoquan He
2022-02-23 19:18     ` Heiko Carstens
2022-02-24  6:33       ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YhToPpeuTqdQgC80@MiWiFi-R3L-srv \
    --to=bhe@redhat.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=David.Laight@aculab.com \
    --cc=agordeev@linux.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=borntraeger@linux.ibm.com \
    --cc=cl@linux.com \
    --cc=davem@davemloft.net \
    --cc=david@redhat.com \
    --cc=gor@linux.ibm.com \
    --cc=hca@linux.ibm.com \
    --cc=hch@lst.de \
    --cc=herbert@gondor.apana.org.au \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-i2c@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=michael@walle.cc \
    --cc=netdev@vger.kernel.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=steffen.klassert@secunet.com \
    --cc=svens@linux.ibm.com \
    --cc=vbabka@suse.cz \
    --cc=wsa@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.