All of lore.kernel.org
 help / color / mirror / Atom feed
From: Arnd Bergmann <arnd@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>, Vineet Gupta <vgupta@kernel.org>,
	Russell King <linux@armlinux.org.uk>,
	Neil Armstrong <neil.armstrong@linaro.org>,
	Linus Walleij <linus.walleij@linaro.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	Brian Cain <bcain@quicinc.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Dinh Nguyen <dinguyen@kernel.org>,
	Stafford Horne <shorne@gmail.com>, Helge Deller <deller@gmx.de>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Rich Felker <dalias@libc.org>,
	John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	"David S. Miller" <davem@davemloft.net>,
	Max Filippov <jcmvbkbc@gmail.com>, Christoph Hellwig <hch@lst.de>,
	Robin Murphy <robin.murphy@arm.com>,
	Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>,
	Conor Dooley <conor.dooley@microchip.com>,
	linux-snps-arc@lists.infradead.org,
	linux-arm-kernel@lists.infradead.org, linux-oxnas@groups.io,
	linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-xtensa@linux-xtensa.org
Subject: [PATCH 17/21] ARM: dma-mapping: use arch_sync_dma_for_{device,cpu}() internally
Date: Mon, 27 Mar 2023 14:13:13 +0200	[thread overview]
Message-ID: <20230327121317.4081816-18-arnd@kernel.org> (raw)
In-Reply-To: <20230327121317.4081816-1-arnd@kernel.org>

From: Arnd Bergmann <arnd@arndb.de>

The arm specific iommu code in dma-mapping.c uses the page+offset based
__dma_page_cpu_to_dev()/__dma_page_dev_to_cpu() helpers in place of the
phys_addr_t based arch_sync_dma_for_device()/arch_sync_dma_for_cpu()
wrappers around the.

In order to be able to move the latter part set of functions into
common code, change the iommu implementation to use them directly
and remove the internal ones as a separate interface.

As page+offset and phys_address are equivalent, but are used in
different parts of the code here, this allows removing some of
the conversion but adds them elsewhere.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 arch/arm/mm/dma-mapping.c | 93 ++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 60 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8bc01071474a..ce4b74f34a58 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -622,16 +622,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	kfree(buf);
 }
 
-static void dma_cache_maint_page(struct page *page, unsigned long offset,
+static void dma_cache_maint(phys_addr_t paddr,
 	size_t size, enum dma_data_direction dir,
 	void (*op)(const void *, size_t, int))
 {
-	unsigned long pfn;
+	unsigned long pfn = PFN_DOWN(paddr);
+	unsigned long offset = paddr % PAGE_SIZE;
 	size_t left = size;
 
-	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
-	offset %= PAGE_SIZE;
-
 	/*
 	 * A single sg entry may refer to multiple physically contiguous
 	 * pages.  But we still need to process highmem pages individually.
@@ -641,8 +639,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
 	do {
 		size_t len = left;
 		void *vaddr;
-
-		page = pfn_to_page(pfn);
+		struct page *page = pfn_to_page(pfn);
 
 		if (PageHighMem(page)) {
 			if (len + offset > PAGE_SIZE)
@@ -674,14 +671,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
  * Note: Drivers should NOT use this function directly.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr;
+	dma_cache_maint(paddr, size, dir, dmac_map_area);
 
-	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
-
-	paddr = page_to_phys(page) + off;
 	if (dir == DMA_FROM_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 	} else {
@@ -690,34 +684,30 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr = page_to_phys(page) + off;
-
 	/* FIXME: non-speculating: not required */
 	/* in any case, don't bother invalidating if DMA to device */
 	if (dir != DMA_TO_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 
-		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+		dma_cache_maint(paddr, size, dir, dmac_unmap_area);
 	}
 
 	/*
 	 * Mark the D-cache clean for these pages to avoid extra flushing.
 	 */
 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
-		unsigned long pfn;
+		unsigned long pfn = PFN_UP(paddr);
+		unsigned long off = paddr & (PAGE_SIZE - 1);
 		size_t left = size;
 
-		pfn = page_to_pfn(page) + off / PAGE_SIZE;
-		off %= PAGE_SIZE;
-		if (off) {
-			pfn++;
+		if (off)
 			left -= PAGE_SIZE - off;
-		}
+
 		while (left >= PAGE_SIZE) {
-			page = pfn_to_page(pfn++);
+			struct page *page = pfn_to_page(pfn++);
 			set_bit(PG_dcache_clean, &page->flags);
 			left -= PAGE_SIZE;
 		}
@@ -1204,7 +1194,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+			arch_sync_dma_for_device(phys + s->offset, s->length, dir);
 
 		prot = __dma_info_to_prot(dir, attrs);
 
@@ -1306,8 +1296,7 @@ static void arm_iommu_unmap_sg(struct device *dev,
 			__iommu_remove_mapping(dev, sg_dma_address(s),
 					       sg_dma_len(s));
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_dev_to_cpu(sg_page(s), s->offset,
-					      s->length, dir);
+			arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 	}
 }
 
@@ -1329,7 +1318,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 
 }
 
@@ -1351,7 +1340,8 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_device(page_to_phys(sg_page(s)) + s->offset,
+					 s->length, dir);
 }
 
 /**
@@ -1373,7 +1363,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
 	int ret, prot, len = PAGE_ALIGN(size + offset);
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		__dma_page_cpu_to_dev(page, offset, size, dir);
+		arch_sync_dma_for_device(page_to_phys(page) + offset,
+					 size, dir);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_MAPPING_ERROR)
@@ -1406,7 +1397,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
+	phys_addr_t phys;
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
 
@@ -1414,8 +1405,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 		return;
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-		__dma_page_dev_to_cpu(page, offset, size, dir);
+		phys = iommu_iova_to_phys(mapping->domain, handle);
+		arch_sync_dma_for_cpu(phys, size, dir);
 	}
 
 	iommu_unmap(mapping->domain, iova, len);
@@ -1483,30 +1474,26 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_dev_to_cpu(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_cpu(phys, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_cpu_to_dev(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_device(phys, size, dir);
 }
 
 static const struct dma_map_ops iommu_ops = {
@@ -1789,20 +1776,6 @@ void arch_teardown_dma_ops(struct device *dev)
 	set_dma_ops(dev, NULL);
 }
 
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
 		gfp_t gfp, unsigned long attrs)
 {
-- 
2.39.2


WARNING: multiple messages have this Message-ID (diff)
From: Arnd Bergmann <arnd@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>, Vineet Gupta <vgupta@kernel.org>,
	Russell King <linux@armlinux.org.uk>,
	Neil Armstrong <neil.armstrong@linaro.org>,
	Linus Walleij <linus.walleij@linaro.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	Brian Cain <bcain@quicinc.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Dinh Nguyen <dinguyen@kernel.org>,
	Stafford Horne <shorne@gmail.com>, Helge Deller <deller@gmx.de>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Rich Felker <dalias@libc.org>,
	John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	"David S. Miller" <davem@davemloft.net>,
	Max Filippov <jcmvbkbc@gmail.com>, Christoph Hellwig <hch@lst.de>,
	Robin Murphy <robin.murphy@arm.com>,
	Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>,
	Conor Dooley <conor.dooley@microchip.com>,
	linux-snps-arc@lists.infradead.org,
	linux-arm-kernel@lists.infradead.org, linux-oxnas@groups.io,
	linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-xtensa@linux-xtensa.org
Subject: [PATCH 17/21] ARM: dma-mapping: use arch_sync_dma_for_{device,cpu}() internally
Date: Mon, 27 Mar 2023 14:13:13 +0200	[thread overview]
Message-ID: <20230327121317.4081816-18-arnd@kernel.org> (raw)
In-Reply-To: <20230327121317.4081816-1-arnd@kernel.org>

From: Arnd Bergmann <arnd@arndb.de>

The arm specific iommu code in dma-mapping.c uses the page+offset based
__dma_page_cpu_to_dev()/__dma_page_dev_to_cpu() helpers in place of the
phys_addr_t based arch_sync_dma_for_device()/arch_sync_dma_for_cpu()
wrappers around the.

In order to be able to move the latter part set of functions into
common code, change the iommu implementation to use them directly
and remove the internal ones as a separate interface.

As page+offset and phys_address are equivalent, but are used in
different parts of the code here, this allows removing some of
the conversion but adds them elsewhere.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 arch/arm/mm/dma-mapping.c | 93 ++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 60 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8bc01071474a..ce4b74f34a58 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -622,16 +622,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	kfree(buf);
 }
 
-static void dma_cache_maint_page(struct page *page, unsigned long offset,
+static void dma_cache_maint(phys_addr_t paddr,
 	size_t size, enum dma_data_direction dir,
 	void (*op)(const void *, size_t, int))
 {
-	unsigned long pfn;
+	unsigned long pfn = PFN_DOWN(paddr);
+	unsigned long offset = paddr % PAGE_SIZE;
 	size_t left = size;
 
-	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
-	offset %= PAGE_SIZE;
-
 	/*
 	 * A single sg entry may refer to multiple physically contiguous
 	 * pages.  But we still need to process highmem pages individually.
@@ -641,8 +639,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
 	do {
 		size_t len = left;
 		void *vaddr;
-
-		page = pfn_to_page(pfn);
+		struct page *page = pfn_to_page(pfn);
 
 		if (PageHighMem(page)) {
 			if (len + offset > PAGE_SIZE)
@@ -674,14 +671,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
  * Note: Drivers should NOT use this function directly.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr;
+	dma_cache_maint(paddr, size, dir, dmac_map_area);
 
-	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
-
-	paddr = page_to_phys(page) + off;
 	if (dir == DMA_FROM_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 	} else {
@@ -690,34 +684,30 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr = page_to_phys(page) + off;
-
 	/* FIXME: non-speculating: not required */
 	/* in any case, don't bother invalidating if DMA to device */
 	if (dir != DMA_TO_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 
-		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+		dma_cache_maint(paddr, size, dir, dmac_unmap_area);
 	}
 
 	/*
 	 * Mark the D-cache clean for these pages to avoid extra flushing.
 	 */
 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
-		unsigned long pfn;
+		unsigned long pfn = PFN_UP(paddr);
+		unsigned long off = paddr & (PAGE_SIZE - 1);
 		size_t left = size;
 
-		pfn = page_to_pfn(page) + off / PAGE_SIZE;
-		off %= PAGE_SIZE;
-		if (off) {
-			pfn++;
+		if (off)
 			left -= PAGE_SIZE - off;
-		}
+
 		while (left >= PAGE_SIZE) {
-			page = pfn_to_page(pfn++);
+			struct page *page = pfn_to_page(pfn++);
 			set_bit(PG_dcache_clean, &page->flags);
 			left -= PAGE_SIZE;
 		}
@@ -1204,7 +1194,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+			arch_sync_dma_for_device(phys + s->offset, s->length, dir);
 
 		prot = __dma_info_to_prot(dir, attrs);
 
@@ -1306,8 +1296,7 @@ static void arm_iommu_unmap_sg(struct device *dev,
 			__iommu_remove_mapping(dev, sg_dma_address(s),
 					       sg_dma_len(s));
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_dev_to_cpu(sg_page(s), s->offset,
-					      s->length, dir);
+			arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 	}
 }
 
@@ -1329,7 +1318,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 
 }
 
@@ -1351,7 +1340,8 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_device(page_to_phys(sg_page(s)) + s->offset,
+					 s->length, dir);
 }
 
 /**
@@ -1373,7 +1363,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
 	int ret, prot, len = PAGE_ALIGN(size + offset);
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		__dma_page_cpu_to_dev(page, offset, size, dir);
+		arch_sync_dma_for_device(page_to_phys(page) + offset,
+					 size, dir);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_MAPPING_ERROR)
@@ -1406,7 +1397,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
+	phys_addr_t phys;
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
 
@@ -1414,8 +1405,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 		return;
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-		__dma_page_dev_to_cpu(page, offset, size, dir);
+		phys = iommu_iova_to_phys(mapping->domain, handle);
+		arch_sync_dma_for_cpu(phys, size, dir);
 	}
 
 	iommu_unmap(mapping->domain, iova, len);
@@ -1483,30 +1474,26 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_dev_to_cpu(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_cpu(phys, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_cpu_to_dev(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_device(phys, size, dir);
 }
 
 static const struct dma_map_ops iommu_ops = {
@@ -1789,20 +1776,6 @@ void arch_teardown_dma_ops(struct device *dev)
 	set_dma_ops(dev, NULL);
 }
 
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
 		gfp_t gfp, unsigned long attrs)
 {
-- 
2.39.2


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

WARNING: multiple messages have this Message-ID (diff)
From: Arnd Bergmann <arnd@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>, Vineet Gupta <vgupta@kernel.org>,
	Russell King <linux@armlinux.org.uk>,
	Neil Armstrong <neil.armstrong@linaro.org>,
	Linus Walleij <linus.walleij@linaro.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	Brian Cain <bcain@quicinc.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Dinh Nguyen <dinguyen@kernel.org>,
	Stafford Horne <shorne@gmail.com>, Helge Deller <deller@gmx.de>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Rich Felker <dalias@libc.org>,
	John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	"David S. Miller" <davem@davemloft.net>,
	Max Filippov <jcmvbkbc@gmail.com>, Christoph Hellwig <hch@lst.de>,
	Robin Murphy <robin.murphy@arm.com>,
	Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>,
	Conor Dooley <conor.dooley@microchip.com>,
	linux-snps-arc@lists.infradead.org,
	linux-arm-kernel@lists.infradead.org, linux-oxnas@groups.io,
	linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-xtensa@linux-xtensa.org
Subject: [PATCH 17/21] ARM: dma-mapping: use arch_sync_dma_for_{device,cpu}() internally
Date: Mon, 27 Mar 2023 14:13:13 +0200	[thread overview]
Message-ID: <20230327121317.4081816-18-arnd@kernel.org> (raw)
In-Reply-To: <20230327121317.4081816-1-arnd@kernel.org>

From: Arnd Bergmann <arnd@arndb.de>

The arm specific iommu code in dma-mapping.c uses the page+offset based
__dma_page_cpu_to_dev()/__dma_page_dev_to_cpu() helpers in place of the
phys_addr_t based arch_sync_dma_for_device()/arch_sync_dma_for_cpu()
wrappers around the.

In order to be able to move the latter part set of functions into
common code, change the iommu implementation to use them directly
and remove the internal ones as a separate interface.

As page+offset and phys_address are equivalent, but are used in
different parts of the code here, this allows removing some of
the conversion but adds them elsewhere.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 arch/arm/mm/dma-mapping.c | 93 ++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 60 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8bc01071474a..ce4b74f34a58 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -622,16 +622,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	kfree(buf);
 }
 
-static void dma_cache_maint_page(struct page *page, unsigned long offset,
+static void dma_cache_maint(phys_addr_t paddr,
 	size_t size, enum dma_data_direction dir,
 	void (*op)(const void *, size_t, int))
 {
-	unsigned long pfn;
+	unsigned long pfn = PFN_DOWN(paddr);
+	unsigned long offset = paddr % PAGE_SIZE;
 	size_t left = size;
 
-	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
-	offset %= PAGE_SIZE;
-
 	/*
 	 * A single sg entry may refer to multiple physically contiguous
 	 * pages.  But we still need to process highmem pages individually.
@@ -641,8 +639,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
 	do {
 		size_t len = left;
 		void *vaddr;
-
-		page = pfn_to_page(pfn);
+		struct page *page = pfn_to_page(pfn);
 
 		if (PageHighMem(page)) {
 			if (len + offset > PAGE_SIZE)
@@ -674,14 +671,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
  * Note: Drivers should NOT use this function directly.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr;
+	dma_cache_maint(paddr, size, dir, dmac_map_area);
 
-	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
-
-	paddr = page_to_phys(page) + off;
 	if (dir == DMA_FROM_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 	} else {
@@ -690,34 +684,30 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr = page_to_phys(page) + off;
-
 	/* FIXME: non-speculating: not required */
 	/* in any case, don't bother invalidating if DMA to device */
 	if (dir != DMA_TO_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 
-		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+		dma_cache_maint(paddr, size, dir, dmac_unmap_area);
 	}
 
 	/*
 	 * Mark the D-cache clean for these pages to avoid extra flushing.
 	 */
 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
-		unsigned long pfn;
+		unsigned long pfn = PFN_UP(paddr);
+		unsigned long off = paddr & (PAGE_SIZE - 1);
 		size_t left = size;
 
-		pfn = page_to_pfn(page) + off / PAGE_SIZE;
-		off %= PAGE_SIZE;
-		if (off) {
-			pfn++;
+		if (off)
 			left -= PAGE_SIZE - off;
-		}
+
 		while (left >= PAGE_SIZE) {
-			page = pfn_to_page(pfn++);
+			struct page *page = pfn_to_page(pfn++);
 			set_bit(PG_dcache_clean, &page->flags);
 			left -= PAGE_SIZE;
 		}
@@ -1204,7 +1194,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+			arch_sync_dma_for_device(phys + s->offset, s->length, dir);
 
 		prot = __dma_info_to_prot(dir, attrs);
 
@@ -1306,8 +1296,7 @@ static void arm_iommu_unmap_sg(struct device *dev,
 			__iommu_remove_mapping(dev, sg_dma_address(s),
 					       sg_dma_len(s));
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_dev_to_cpu(sg_page(s), s->offset,
-					      s->length, dir);
+			arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 	}
 }
 
@@ -1329,7 +1318,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 
 }
 
@@ -1351,7 +1340,8 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_device(page_to_phys(sg_page(s)) + s->offset,
+					 s->length, dir);
 }
 
 /**
@@ -1373,7 +1363,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
 	int ret, prot, len = PAGE_ALIGN(size + offset);
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		__dma_page_cpu_to_dev(page, offset, size, dir);
+		arch_sync_dma_for_device(page_to_phys(page) + offset,
+					 size, dir);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_MAPPING_ERROR)
@@ -1406,7 +1397,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
+	phys_addr_t phys;
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
 
@@ -1414,8 +1405,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 		return;
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-		__dma_page_dev_to_cpu(page, offset, size, dir);
+		phys = iommu_iova_to_phys(mapping->domain, handle);
+		arch_sync_dma_for_cpu(phys, size, dir);
 	}
 
 	iommu_unmap(mapping->domain, iova, len);
@@ -1483,30 +1474,26 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_dev_to_cpu(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_cpu(phys, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_cpu_to_dev(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_device(phys, size, dir);
 }
 
 static const struct dma_map_ops iommu_ops = {
@@ -1789,20 +1776,6 @@ void arch_teardown_dma_ops(struct device *dev)
 	set_dma_ops(dev, NULL);
 }
 
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
 		gfp_t gfp, unsigned long attrs)
 {
-- 
2.39.2


_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

WARNING: multiple messages have this Message-ID (diff)
From: Arnd Bergmann <arnd@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>, Vineet Gupta <vgupta@kernel.org>,
	Russell King <linux@armlinux.org.uk>,
	Neil Armstrong <neil.armstrong@linaro.org>,
	Linus Walleij <linus.walleij@linaro.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	Brian Cain <bcain@quicinc.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Dinh Nguyen <dinguyen@kernel.org>,
	Stafford Horne <shorne@gmail.com>, Helge Deller <deller@gmx.de>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Rich Felker <dalias@libc.org>,
	John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	"David S. Miller" <davem@davemloft.net>,
	Max Filippov <jcmvbkbc@gmail.com>, Christoph Hellwig <hch@lst.de>,
	Robin Murphy <robin.murphy@arm.com>,
	Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>,
	Conor Dooley <conor.dooley@microchip.com>,
	linux-snps-arc@lists.infradead.org,
	linux-arm-kernel@lists.infradead.org, linux-oxnas@groups.io,
	linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-xtensa@linux-xtensa.org
Subject: [PATCH 17/21] ARM: dma-mapping: use arch_sync_dma_for_{device,cpu}() internally
Date: Mon, 27 Mar 2023 14:13:13 +0200	[thread overview]
Message-ID: <20230327121317.4081816-18-arnd@kernel.org> (raw)
In-Reply-To: <20230327121317.4081816-1-arnd@kernel.org>

From: Arnd Bergmann <arnd@arndb.de>

The arm specific iommu code in dma-mapping.c uses the page+offset based
__dma_page_cpu_to_dev()/__dma_page_dev_to_cpu() helpers in place of the
phys_addr_t based arch_sync_dma_for_device()/arch_sync_dma_for_cpu()
wrappers around the.

In order to be able to move the latter part set of functions into
common code, change the iommu implementation to use them directly
and remove the internal ones as a separate interface.

As page+offset and phys_address are equivalent, but are used in
different parts of the code here, this allows removing some of
the conversion but adds them elsewhere.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 arch/arm/mm/dma-mapping.c | 93 ++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 60 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8bc01071474a..ce4b74f34a58 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -622,16 +622,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	kfree(buf);
 }
 
-static void dma_cache_maint_page(struct page *page, unsigned long offset,
+static void dma_cache_maint(phys_addr_t paddr,
 	size_t size, enum dma_data_direction dir,
 	void (*op)(const void *, size_t, int))
 {
-	unsigned long pfn;
+	unsigned long pfn = PFN_DOWN(paddr);
+	unsigned long offset = paddr % PAGE_SIZE;
 	size_t left = size;
 
-	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
-	offset %= PAGE_SIZE;
-
 	/*
 	 * A single sg entry may refer to multiple physically contiguous
 	 * pages.  But we still need to process highmem pages individually.
@@ -641,8 +639,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
 	do {
 		size_t len = left;
 		void *vaddr;
-
-		page = pfn_to_page(pfn);
+		struct page *page = pfn_to_page(pfn);
 
 		if (PageHighMem(page)) {
 			if (len + offset > PAGE_SIZE)
@@ -674,14 +671,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
  * Note: Drivers should NOT use this function directly.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr;
+	dma_cache_maint(paddr, size, dir, dmac_map_area);
 
-	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
-
-	paddr = page_to_phys(page) + off;
 	if (dir == DMA_FROM_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 	} else {
@@ -690,34 +684,30 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr = page_to_phys(page) + off;
-
 	/* FIXME: non-speculating: not required */
 	/* in any case, don't bother invalidating if DMA to device */
 	if (dir != DMA_TO_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 
-		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+		dma_cache_maint(paddr, size, dir, dmac_unmap_area);
 	}
 
 	/*
 	 * Mark the D-cache clean for these pages to avoid extra flushing.
 	 */
 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
-		unsigned long pfn;
+		unsigned long pfn = PFN_UP(paddr);
+		unsigned long off = paddr & (PAGE_SIZE - 1);
 		size_t left = size;
 
-		pfn = page_to_pfn(page) + off / PAGE_SIZE;
-		off %= PAGE_SIZE;
-		if (off) {
-			pfn++;
+		if (off)
 			left -= PAGE_SIZE - off;
-		}
+
 		while (left >= PAGE_SIZE) {
-			page = pfn_to_page(pfn++);
+			struct page *page = pfn_to_page(pfn++);
 			set_bit(PG_dcache_clean, &page->flags);
 			left -= PAGE_SIZE;
 		}
@@ -1204,7 +1194,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+			arch_sync_dma_for_device(phys + s->offset, s->length, dir);
 
 		prot = __dma_info_to_prot(dir, attrs);
 
@@ -1306,8 +1296,7 @@ static void arm_iommu_unmap_sg(struct device *dev,
 			__iommu_remove_mapping(dev, sg_dma_address(s),
 					       sg_dma_len(s));
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_dev_to_cpu(sg_page(s), s->offset,
-					      s->length, dir);
+			arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 	}
 }
 
@@ -1329,7 +1318,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 
 }
 
@@ -1351,7 +1340,8 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_device(page_to_phys(sg_page(s)) + s->offset,
+					 s->length, dir);
 }
 
 /**
@@ -1373,7 +1363,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
 	int ret, prot, len = PAGE_ALIGN(size + offset);
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		__dma_page_cpu_to_dev(page, offset, size, dir);
+		arch_sync_dma_for_device(page_to_phys(page) + offset,
+					 size, dir);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_MAPPING_ERROR)
@@ -1406,7 +1397,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
+	phys_addr_t phys;
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
 
@@ -1414,8 +1405,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 		return;
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-		__dma_page_dev_to_cpu(page, offset, size, dir);
+		phys = iommu_iova_to_phys(mapping->domain, handle);
+		arch_sync_dma_for_cpu(phys, size, dir);
 	}
 
 	iommu_unmap(mapping->domain, iova, len);
@@ -1483,30 +1474,26 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_dev_to_cpu(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_cpu(phys, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_cpu_to_dev(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_device(phys, size, dir);
 }
 
 static const struct dma_map_ops iommu_ops = {
@@ -1789,20 +1776,6 @@ void arch_teardown_dma_ops(struct device *dev)
 	set_dma_ops(dev, NULL);
 }
 
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
 		gfp_t gfp, unsigned long attrs)
 {
-- 
2.39.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Arnd Bergmann <arnd@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: Rich Felker <dalias@libc.org>,
	linux-sh@vger.kernel.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Linus Walleij <linus.walleij@linaro.org>,
	John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	Max Filippov <jcmvbkbc@gmail.com>,
	Conor Dooley <conor.dooley@microchip.com>,
	Guo Ren <guoren@kernel.org>,
	linux-csky@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-riscv@lists.infradead.org, Will Deacon <will@kernel.org>,
	Christoph Hellwig <hch@lst.de>, Helge Deller <deller@gmx.de>,
	Russell King <linux@armlinux.org.uk>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Vineet Gupta <vgupta@kernel.org>,
	linux-snps-arc@lists.infradead.org,
	linux-xtensa@linux-xtensa.org, Arnd Bergmann <arnd@arndb.de>,
	Brian Cain <bcain@quicinc.com>,
	Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>,
	linux-m68k@lists.linux-m68k.org,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Stafford Horne <shorne@gmail.com>,
	linux-arm-kernel@lists.infradead.org,
	Neil Armstrong <neil.armstrong@linaro.org>,
	Michal Sime k <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	linux-parisc@vger.kernel.org, linux-openrisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-mips@vger.kernel.org,
	Dinh Nguyen <dinguyen@kernel.org>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	linux-hexagon@vger.kernel.org, linux-oxnas@groups.io,
	Robin Murphy <robin.murphy@arm.com>,
	"David S. Miller" <davem@davemloft.net>
Subject: [PATCH 17/21] ARM: dma-mapping: use arch_sync_dma_for_{device,cpu}() internally
Date: Mon, 27 Mar 2023 14:13:13 +0200	[thread overview]
Message-ID: <20230327121317.4081816-18-arnd@kernel.org> (raw)
In-Reply-To: <20230327121317.4081816-1-arnd@kernel.org>

From: Arnd Bergmann <arnd@arndb.de>

The arm specific iommu code in dma-mapping.c uses the page+offset based
__dma_page_cpu_to_dev()/__dma_page_dev_to_cpu() helpers in place of the
phys_addr_t based arch_sync_dma_for_device()/arch_sync_dma_for_cpu()
wrappers around the.

In order to be able to move the latter part set of functions into
common code, change the iommu implementation to use them directly
and remove the internal ones as a separate interface.

As page+offset and phys_address are equivalent, but are used in
different parts of the code here, this allows removing some of
the conversion but adds them elsewhere.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 arch/arm/mm/dma-mapping.c | 93 ++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 60 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8bc01071474a..ce4b74f34a58 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -622,16 +622,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	kfree(buf);
 }
 
-static void dma_cache_maint_page(struct page *page, unsigned long offset,
+static void dma_cache_maint(phys_addr_t paddr,
 	size_t size, enum dma_data_direction dir,
 	void (*op)(const void *, size_t, int))
 {
-	unsigned long pfn;
+	unsigned long pfn = PFN_DOWN(paddr);
+	unsigned long offset = paddr % PAGE_SIZE;
 	size_t left = size;
 
-	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
-	offset %= PAGE_SIZE;
-
 	/*
 	 * A single sg entry may refer to multiple physically contiguous
 	 * pages.  But we still need to process highmem pages individually.
@@ -641,8 +639,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
 	do {
 		size_t len = left;
 		void *vaddr;
-
-		page = pfn_to_page(pfn);
+		struct page *page = pfn_to_page(pfn);
 
 		if (PageHighMem(page)) {
 			if (len + offset > PAGE_SIZE)
@@ -674,14 +671,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
  * Note: Drivers should NOT use this function directly.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr;
+	dma_cache_maint(paddr, size, dir, dmac_map_area);
 
-	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
-
-	paddr = page_to_phys(page) + off;
 	if (dir == DMA_FROM_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 	} else {
@@ -690,34 +684,30 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr = page_to_phys(page) + off;
-
 	/* FIXME: non-speculating: not required */
 	/* in any case, don't bother invalidating if DMA to device */
 	if (dir != DMA_TO_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 
-		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+		dma_cache_maint(paddr, size, dir, dmac_unmap_area);
 	}
 
 	/*
 	 * Mark the D-cache clean for these pages to avoid extra flushing.
 	 */
 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
-		unsigned long pfn;
+		unsigned long pfn = PFN_UP(paddr);
+		unsigned long off = paddr & (PAGE_SIZE - 1);
 		size_t left = size;
 
-		pfn = page_to_pfn(page) + off / PAGE_SIZE;
-		off %= PAGE_SIZE;
-		if (off) {
-			pfn++;
+		if (off)
 			left -= PAGE_SIZE - off;
-		}
+
 		while (left >= PAGE_SIZE) {
-			page = pfn_to_page(pfn++);
+			struct page *page = pfn_to_page(pfn++);
 			set_bit(PG_dcache_clean, &page->flags);
 			left -= PAGE_SIZE;
 		}
@@ -1204,7 +1194,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+			arch_sync_dma_for_device(phys + s->offset, s->length, dir);
 
 		prot = __dma_info_to_prot(dir, attrs);
 
@@ -1306,8 +1296,7 @@ static void arm_iommu_unmap_sg(struct device *dev,
 			__iommu_remove_mapping(dev, sg_dma_address(s),
 					       sg_dma_len(s));
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_dev_to_cpu(sg_page(s), s->offset,
-					      s->length, dir);
+			arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 	}
 }
 
@@ -1329,7 +1318,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 
 }
 
@@ -1351,7 +1340,8 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_device(page_to_phys(sg_page(s)) + s->offset,
+					 s->length, dir);
 }
 
 /**
@@ -1373,7 +1363,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
 	int ret, prot, len = PAGE_ALIGN(size + offset);
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		__dma_page_cpu_to_dev(page, offset, size, dir);
+		arch_sync_dma_for_device(page_to_phys(page) + offset,
+					 size, dir);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_MAPPING_ERROR)
@@ -1406,7 +1397,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
+	phys_addr_t phys;
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
 
@@ -1414,8 +1405,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 		return;
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-		__dma_page_dev_to_cpu(page, offset, size, dir);
+		phys = iommu_iova_to_phys(mapping->domain, handle);
+		arch_sync_dma_for_cpu(phys, size, dir);
 	}
 
 	iommu_unmap(mapping->domain, iova, len);
@@ -1483,30 +1474,26 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_dev_to_cpu(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_cpu(phys, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_cpu_to_dev(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_device(phys, size, dir);
 }
 
 static const struct dma_map_ops iommu_ops = {
@@ -1789,20 +1776,6 @@ void arch_teardown_dma_ops(struct device *dev)
 	set_dma_ops(dev, NULL);
 }
 
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
 		gfp_t gfp, unsigned long attrs)
 {
-- 
2.39.2


WARNING: multiple messages have this Message-ID (diff)
From: Arnd Bergmann <arnd@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>, Vineet Gupta <vgupta@kernel.org>,
	Russell King <linux@armlinux.org.uk>,
	Neil Armstrong <neil.armstrong@linaro.org>,
	Linus Walleij <linus.walleij@linaro.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	Brian Cain <bcain@quicinc.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Dinh Nguyen <dinguyen@kernel.org>,
	Stafford Horne <shorne@gmail.com>, Helge Deller <deller@gmx.de>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Rich Felker <dalias@libc.org>,
	John
Subject: [PATCH 17/21] ARM: dma-mapping: use arch_sync_dma_for_{device,cpu}() internally
Date: Mon, 27 Mar 2023 14:13:13 +0200	[thread overview]
Message-ID: <20230327121317.4081816-18-arnd@kernel.org> (raw)
In-Reply-To: <20230327121317.4081816-1-arnd@kernel.org>

From: Arnd Bergmann <arnd@arndb.de>

The arm specific iommu code in dma-mapping.c uses the page+offset based
__dma_page_cpu_to_dev()/__dma_page_dev_to_cpu() helpers in place of the
phys_addr_t based arch_sync_dma_for_device()/arch_sync_dma_for_cpu()
wrappers around the.

In order to be able to move the latter part set of functions into
common code, change the iommu implementation to use them directly
and remove the internal ones as a separate interface.

As page+offset and phys_address are equivalent, but are used in
different parts of the code here, this allows removing some of
the conversion but adds them elsewhere.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 arch/arm/mm/dma-mapping.c | 93 ++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 60 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8bc01071474a..ce4b74f34a58 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -622,16 +622,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	kfree(buf);
 }
 
-static void dma_cache_maint_page(struct page *page, unsigned long offset,
+static void dma_cache_maint(phys_addr_t paddr,
 	size_t size, enum dma_data_direction dir,
 	void (*op)(const void *, size_t, int))
 {
-	unsigned long pfn;
+	unsigned long pfn = PFN_DOWN(paddr);
+	unsigned long offset = paddr % PAGE_SIZE;
 	size_t left = size;
 
-	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
-	offset %= PAGE_SIZE;
-
 	/*
 	 * A single sg entry may refer to multiple physically contiguous
 	 * pages.  But we still need to process highmem pages individually.
@@ -641,8 +639,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
 	do {
 		size_t len = left;
 		void *vaddr;
-
-		page = pfn_to_page(pfn);
+		struct page *page = pfn_to_page(pfn);
 
 		if (PageHighMem(page)) {
 			if (len + offset > PAGE_SIZE)
@@ -674,14 +671,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
  * Note: Drivers should NOT use this function directly.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr;
+	dma_cache_maint(paddr, size, dir, dmac_map_area);
 
-	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
-
-	paddr = page_to_phys(page) + off;
 	if (dir == DMA_FROM_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 	} else {
@@ -690,34 +684,30 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	phys_addr_t paddr = page_to_phys(page) + off;
-
 	/* FIXME: non-speculating: not required */
 	/* in any case, don't bother invalidating if DMA to device */
 	if (dir != DMA_TO_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 
-		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+		dma_cache_maint(paddr, size, dir, dmac_unmap_area);
 	}
 
 	/*
 	 * Mark the D-cache clean for these pages to avoid extra flushing.
 	 */
 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
-		unsigned long pfn;
+		unsigned long pfn = PFN_UP(paddr);
+		unsigned long off = paddr & (PAGE_SIZE - 1);
 		size_t left = size;
 
-		pfn = page_to_pfn(page) + off / PAGE_SIZE;
-		off %= PAGE_SIZE;
-		if (off) {
-			pfn++;
+		if (off)
 			left -= PAGE_SIZE - off;
-		}
+
 		while (left >= PAGE_SIZE) {
-			page = pfn_to_page(pfn++);
+			struct page *page = pfn_to_page(pfn++);
 			set_bit(PG_dcache_clean, &page->flags);
 			left -= PAGE_SIZE;
 		}
@@ -1204,7 +1194,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+			arch_sync_dma_for_device(phys + s->offset, s->length, dir);
 
 		prot = __dma_info_to_prot(dir, attrs);
 
@@ -1306,8 +1296,7 @@ static void arm_iommu_unmap_sg(struct device *dev,
 			__iommu_remove_mapping(dev, sg_dma_address(s),
 					       sg_dma_len(s));
 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			__dma_page_dev_to_cpu(sg_page(s), s->offset,
-					      s->length, dir);
+			arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 	}
 }
 
@@ -1329,7 +1318,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
 
 }
 
@@ -1351,7 +1340,8 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
 		return;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+		arch_sync_dma_for_device(page_to_phys(sg_page(s)) + s->offset,
+					 s->length, dir);
 }
 
 /**
@@ -1373,7 +1363,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
 	int ret, prot, len = PAGE_ALIGN(size + offset);
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		__dma_page_cpu_to_dev(page, offset, size, dir);
+		arch_sync_dma_for_device(page_to_phys(page) + offset,
+					 size, dir);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_MAPPING_ERROR)
@@ -1406,7 +1397,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
+	phys_addr_t phys;
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
 
@@ -1414,8 +1405,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 		return;
 
 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-		__dma_page_dev_to_cpu(page, offset, size, dir);
+		phys = iommu_iova_to_phys(mapping->domain, handle);
+		arch_sync_dma_for_cpu(phys, size, dir);
 	}
 
 	iommu_unmap(mapping->domain, iova, len);
@@ -1483,30 +1474,26 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_dev_to_cpu(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_cpu(phys, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page;
-	unsigned int offset = handle & ~PAGE_MASK;
+	phys_addr_t phys;
 
-	if (dev->dma_coherent || !iova)
+	if (dev->dma_coherent || !(handle & PAGE_MASK))
 		return;
 
-	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
-	__dma_page_cpu_to_dev(page, offset, size, dir);
+	phys = iommu_iova_to_phys(mapping->domain, handle);
+	arch_sync_dma_for_device(phys, size, dir);
 }
 
 static const struct dma_map_ops iommu_ops = {
@@ -1789,20 +1776,6 @@ void arch_teardown_dma_ops(struct device *dev)
 	set_dma_ops(dev, NULL);
 }
 
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}
-
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-		enum dma_data_direction dir)
-{
-	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
-			      size, dir);
-}

  parent reply	other threads:[~2023-03-27 12:18 UTC|newest]

Thread overview: 456+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-27 12:12 [PATCH 00/21] dma-mapping: unify support for cache flushes Arnd Bergmann
2023-03-27 12:12 ` Arnd Bergmann
2023-03-27 12:12 ` Arnd Bergmann
2023-03-27 12:12 ` Arnd Bergmann
2023-03-27 12:12 ` Arnd Bergmann
2023-03-27 12:12 ` Arnd Bergmann
2023-03-27 12:12 ` [PATCH 01/21] openrisc: dma-mapping: flush bidirectional mappings Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12 ` [PATCH 02/21] xtensa: dma-mapping: use normal cache invalidation rules Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 15:42   ` Max Filippov
2023-03-27 15:42     ` Max Filippov
2023-03-27 15:42     ` Max Filippov
2023-03-27 15:42     ` Max Filippov
2023-03-27 15:42     ` Max Filippov
2023-03-27 15:42     ` Max Filippov
2023-03-27 12:12 ` [PATCH 03/21] sparc32: flush caches in dma_sync_*for_device Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:12   ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 04/21] microblaze: dma-mapping: skip extra DMA flushes Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 05/21] powerpc: dma-mapping: split out cache operation logic Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 06/21] powerpc: dma-mapping: minimize for_cpu flushing Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:56   ` Christophe Leroy
2023-03-27 12:56     ` Christophe Leroy
2023-03-27 12:56     ` Christophe Leroy
2023-03-27 12:56     ` Christophe Leroy
2023-03-27 12:56     ` Christophe Leroy
2023-03-27 12:56     ` Christophe Leroy
2023-03-27 13:02     ` Arnd Bergmann
2023-03-27 13:02       ` Arnd Bergmann
2023-03-27 13:02       ` Arnd Bergmann
2023-03-27 13:02       ` Arnd Bergmann
2023-03-27 13:02       ` Arnd Bergmann
2023-03-27 13:02       ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 07/21] powerpc: dma-mapping: always clean cache in _for_device() op Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 08/21] riscv: dma-mapping: only invalidate after DMA, not flush Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-29 20:48   ` Conor Dooley
2023-03-29 20:48     ` Conor Dooley
2023-03-29 20:48     ` Conor Dooley
2023-03-29 20:48     ` Conor Dooley
2023-03-29 20:48     ` Conor Dooley
2023-03-29 20:48     ` Conor Dooley
2023-03-30  7:10     ` Arnd Bergmann
2023-03-30  7:10       ` Arnd Bergmann
2023-03-30  7:10       ` Arnd Bergmann
2023-03-30  7:10       ` Arnd Bergmann
2023-03-30  7:10       ` Arnd Bergmann
2023-03-30  7:10       ` Arnd Bergmann
2023-03-29 21:51   ` Jessica Clarke
2023-03-29 21:51     ` Jessica Clarke
2023-03-29 21:51     ` Jessica Clarke
2023-03-29 21:51     ` Jessica Clarke
2023-03-29 21:51     ` Jessica Clarke
2023-03-29 21:51     ` Jessica Clarke
2023-03-30 12:59   ` Lad, Prabhakar
2023-03-30 12:59     ` Lad, Prabhakar
2023-03-30 12:59     ` Lad, Prabhakar
2023-03-30 12:59     ` Lad, Prabhakar
2023-03-30 12:59     ` Lad, Prabhakar
2023-03-30 12:59     ` Lad, Prabhakar
2023-04-19 14:22   ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-03-27 12:13 ` [PATCH 09/21] riscv: dma-mapping: skip invalidation before bidirectional DMA Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-29 20:16   ` Conor Dooley
2023-03-29 20:16     ` Conor Dooley
2023-03-29 20:16     ` Conor Dooley
2023-03-29 20:16     ` Conor Dooley
2023-03-29 20:16     ` Conor Dooley
2023-03-29 20:16     ` Conor Dooley
2023-03-30 13:26   ` Lad, Prabhakar
2023-03-30 13:26     ` Lad, Prabhakar
2023-03-30 13:26     ` Lad, Prabhakar
2023-03-30 13:26     ` Lad, Prabhakar
2023-03-30 13:26     ` Lad, Prabhakar
2023-03-30 13:26     ` Lad, Prabhakar
2023-04-19 14:22   ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-04-19 14:22     ` Palmer Dabbelt
2023-05-05  5:47   ` Guo Ren
2023-05-05  5:47     ` Guo Ren
2023-05-05  5:47     ` Guo Ren
2023-05-05  5:47     ` Guo Ren
2023-05-05  5:47     ` Guo Ren
2023-05-05  5:47     ` Guo Ren
2023-05-05 13:18     ` Arnd Bergmann
2023-05-05 13:18       ` Arnd Bergmann
2023-05-05 13:18       ` Arnd Bergmann
2023-05-05 13:18       ` Arnd Bergmann
2023-05-05 13:18       ` Arnd Bergmann
2023-05-05 13:18       ` Arnd Bergmann
2023-05-06  7:25       ` Guo Ren
2023-05-06  7:25         ` Guo Ren
2023-05-06  7:25         ` Guo Ren
2023-05-06  7:25         ` Guo Ren
2023-05-06  7:25         ` Guo Ren
2023-05-06  7:25         ` Guo Ren
2023-05-06  7:53         ` Arnd Bergmann
2023-05-06  7:53           ` Arnd Bergmann
2023-05-06  7:53           ` Arnd Bergmann
2023-05-06  7:53           ` Arnd Bergmann
2023-05-06  7:53           ` Arnd Bergmann
2023-05-06  7:53           ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 10/21] csky: dma-mapping: skip invalidating before DMA from device Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 13:37   ` Guo Ren
2023-03-27 13:37     ` Guo Ren
2023-03-27 13:37     ` Guo Ren
2023-03-27 13:37     ` Guo Ren
2023-03-27 13:37     ` Guo Ren
2023-03-27 13:37     ` Guo Ren
2023-03-27 12:13 ` [PATCH 11/21] mips: dma-mapping: skip invalidating before bidirectional DMA Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 12/21] mips: dma-mapping: split out cache operation logic Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 13/21] arc: dma-mapping: skip invalidating before bidirectional DMA Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-04-02  6:52   ` Vineet Gupta
2023-04-02  6:52     ` Vineet Gupta
2023-04-02  6:52     ` Vineet Gupta
2023-04-02  6:52     ` Vineet Gupta
2023-04-02  6:52     ` Vineet Gupta
2023-04-02  6:52     ` Vineet Gupta
2023-04-04  8:27     ` Shahab Vahedi
2023-04-04  8:27       ` Shahab Vahedi
2023-04-04  8:27       ` Shahab Vahedi
2023-04-04  8:27       ` Shahab Vahedi
2023-04-04  8:27       ` Shahab Vahedi
2023-04-04  8:27       ` Shahab Vahedi
2023-04-06  9:01     ` Shahab Vahedi
2023-04-06  9:01       ` Shahab Vahedi
2023-04-06  9:01       ` Shahab Vahedi
2023-04-06  9:01       ` Shahab Vahedi
2023-04-06  9:01       ` Shahab Vahedi
2023-04-06  9:01       ` Shahab Vahedi
2023-03-27 12:13 ` [PATCH 14/21] parisc: dma-mapping: use regular flush/invalidate ops Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 15/21] ARM: dma-mapping: always invalidate WT caches before DMA Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-31  9:01   ` Linus Walleij
2023-03-31  9:01     ` Linus Walleij
2023-03-31  9:01     ` Linus Walleij
2023-03-31  9:01     ` Linus Walleij
2023-03-31  9:01     ` Linus Walleij
2023-03-31  9:01     ` Linus Walleij
2023-03-31  9:07   ` Russell King (Oracle)
2023-03-31  9:07     ` Russell King (Oracle)
2023-03-31  9:07     ` Russell King (Oracle)
2023-03-31  9:07     ` Russell King (Oracle)
2023-03-31  9:07     ` Russell King (Oracle)
2023-03-31  9:07     ` Russell King (Oracle)
2023-03-31  9:35     ` Russell King (Oracle)
2023-03-31  9:35       ` Russell King (Oracle)
2023-03-31  9:35       ` Russell King (Oracle)
2023-03-31  9:35       ` Russell King (Oracle)
2023-03-31  9:35       ` Russell King (Oracle)
2023-03-31  9:35       ` Russell King (Oracle)
2023-03-31 10:38       ` Arnd Bergmann
2023-03-31 10:38         ` Arnd Bergmann
2023-03-31 10:38         ` Arnd Bergmann
2023-03-31 10:38         ` Arnd Bergmann
2023-03-31 10:38         ` Arnd Bergmann
2023-03-31 10:38         ` Arnd Bergmann
2023-03-31 11:01         ` David Laight
2023-03-31 11:01           ` David Laight
2023-03-31 11:01           ` David Laight
2023-03-31 11:01           ` David Laight
2023-03-31 11:01           ` David Laight
2023-03-31 11:08         ` Russell King (Oracle)
2023-03-31 11:08           ` Russell King (Oracle)
2023-03-31 11:08           ` Russell King (Oracle)
2023-03-31 11:08           ` Russell King (Oracle)
2023-03-31 11:08           ` Russell King (Oracle)
2023-03-31 11:08           ` Russell King (Oracle)
2023-03-31 12:32           ` Arnd Bergmann
2023-03-31 12:32             ` Arnd Bergmann
2023-03-31 12:32             ` Arnd Bergmann
2023-03-31 12:32             ` Arnd Bergmann
2023-03-31 12:32             ` Arnd Bergmann
2023-03-31 12:32             ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 16/21] ARM: dma-mapping: bring back dmac_{clean,inv}_range Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 13:10   ` Russell King (Oracle)
2023-03-27 13:10     ` Russell King (Oracle)
2023-03-27 13:10     ` Russell King (Oracle)
2023-03-27 13:10     ` Russell King (Oracle)
2023-03-27 13:10     ` Russell King (Oracle)
2023-03-27 13:10     ` Russell King (Oracle)
2023-03-27 12:13 ` Arnd Bergmann [this message]
2023-03-27 12:13   ` [PATCH 17/21] ARM: dma-mapping: use arch_sync_dma_for_{device,cpu}() internally Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-31  9:10   ` Linus Walleij
2023-03-31  9:10     ` Linus Walleij
2023-03-31  9:10     ` Linus Walleij
2023-03-31  9:10     ` Linus Walleij
2023-03-31  9:10     ` Linus Walleij
2023-03-31  9:10     ` Linus Walleij
2023-03-31 12:48     ` Arnd Bergmann
2023-03-31 12:48       ` Arnd Bergmann
2023-03-31 12:48       ` Arnd Bergmann
2023-03-31 12:48       ` Arnd Bergmann
2023-03-31 12:48       ` Arnd Bergmann
2023-03-31 12:48       ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 18/21] ARM: drop SMP support for ARM11MPCore Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-30  7:48   ` Neil Armstrong
2023-03-30  7:48     ` Neil Armstrong
2023-03-30  7:48     ` Neil Armstrong
2023-03-30  7:48     ` Neil Armstrong
2023-03-30  7:48     ` Neil Armstrong
2023-03-30  7:48     ` Neil Armstrong
2023-03-30 10:03     ` Arnd Bergmann
2023-03-30 10:03       ` Arnd Bergmann
2023-03-30 10:03       ` Arnd Bergmann
2023-03-30 10:03       ` Arnd Bergmann
2023-03-30 10:03       ` Arnd Bergmann
2023-03-30 10:03       ` Arnd Bergmann
2023-03-30 16:40       ` Neil Armstrong
2023-03-30 16:40         ` Neil Armstrong
2023-03-30 16:40         ` Neil Armstrong
2023-03-30 16:40         ` Neil Armstrong
2023-03-30 16:40         ` Neil Armstrong
2023-03-30 16:40         ` Neil Armstrong
2023-03-30  8:12   ` Linus Walleij
2023-03-30  8:12     ` Linus Walleij
2023-03-30  8:12     ` Linus Walleij
2023-03-30  8:12     ` Linus Walleij
2023-03-30  8:12     ` Linus Walleij
2023-03-30  8:12     ` Linus Walleij
2023-03-30 11:28   ` Joel Stanley
2023-03-31 12:54     ` Arnd Bergmann
2023-04-05  1:49       ` Joel Stanley
2023-03-30 11:51   ` Ard Biesheuvel
2023-03-30 11:51     ` Ard Biesheuvel
2023-03-30 11:51     ` Ard Biesheuvel
2023-03-30 11:51     ` Ard Biesheuvel
2023-03-30 11:51     ` Ard Biesheuvel
2023-03-30 11:51     ` Ard Biesheuvel
2023-03-31 17:09   ` Catalin Marinas
2023-03-31 17:09     ` Catalin Marinas
2023-03-31 17:09     ` Catalin Marinas
2023-03-31 17:09     ` Catalin Marinas
2023-03-31 17:09     ` Catalin Marinas
2023-03-31 17:09     ` Catalin Marinas
2023-03-27 12:13 ` [PATCH 19/21] ARM: dma-mapping: use generic form of arch_sync_dma_* helpers Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13 ` [PATCH 20/21] ARM: dma-mapping: split out arch_dma_mark_clean() helper Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:48   ` Robin Murphy
2023-03-27 12:48     ` Robin Murphy
2023-03-27 12:48     ` Robin Murphy
2023-03-27 12:48     ` Robin Murphy
2023-03-27 12:48     ` Robin Murphy
2023-03-27 12:48     ` Robin Murphy
2023-03-31 14:00     ` Arnd Bergmann
2023-03-31 14:00       ` Arnd Bergmann
2023-03-31 14:00       ` Arnd Bergmann
2023-03-31 14:00       ` Arnd Bergmann
2023-03-31 14:00       ` Arnd Bergmann
2023-03-31 14:00       ` Arnd Bergmann
2023-03-31 15:12       ` Robin Murphy
2023-03-31 15:12         ` Robin Murphy
2023-03-31 15:12         ` Robin Murphy
2023-03-31 15:12         ` Robin Murphy
2023-03-31 15:12         ` Robin Murphy
2023-03-31 15:12         ` Robin Murphy
2023-03-31 17:20         ` Arnd Bergmann
2023-03-31 17:20           ` Arnd Bergmann
2023-03-31 17:20           ` Arnd Bergmann
2023-03-31 17:20           ` Arnd Bergmann
2023-03-31 17:20           ` Arnd Bergmann
2023-03-31 17:20           ` Arnd Bergmann
2023-03-27 15:01   ` Russell King (Oracle)
2023-03-27 15:01     ` Russell King (Oracle)
2023-03-27 15:01     ` Russell King (Oracle)
2023-03-27 15:01     ` Russell King (Oracle)
2023-03-27 15:01     ` Russell King (Oracle)
2023-03-27 15:01     ` Russell King (Oracle)
2023-03-31 14:06     ` Arnd Bergmann
2023-03-31 14:06       ` Arnd Bergmann
2023-03-31 14:06       ` Arnd Bergmann
2023-03-31 14:06       ` Arnd Bergmann
2023-03-31 14:06       ` Arnd Bergmann
2023-03-31 14:06       ` Arnd Bergmann
2023-03-31 15:54       ` Russell King (Oracle)
2023-03-31 15:54         ` Russell King (Oracle)
2023-03-31 15:54         ` Russell King (Oracle)
2023-03-31 15:54         ` Russell King (Oracle)
2023-03-31 15:54         ` Russell King (Oracle)
2023-03-31 15:54         ` Russell King (Oracle)
2023-03-27 18:42   ` kernel test robot
2023-03-27 19:03   ` kernel test robot
2023-03-28 13:17   ` kernel test robot
2023-07-03  7:54   ` Geert Uytterhoeven
2023-07-03  7:54     ` Geert Uytterhoeven
2023-07-03  7:54     ` Geert Uytterhoeven
2023-07-03  7:54     ` Geert Uytterhoeven
2023-07-03  7:54     ` Geert Uytterhoeven
2023-07-03  7:54     ` Geert Uytterhoeven
2023-07-06 14:11     ` Christoph Hellwig
2023-07-06 14:11       ` Christoph Hellwig
2023-07-06 14:11       ` Christoph Hellwig
2023-07-06 14:11       ` Christoph Hellwig
2023-07-06 14:11       ` Christoph Hellwig
2023-07-06 14:11       ` Christoph Hellwig
2023-03-27 12:13 ` [PATCH 21/21] dma-mapping: replace custom code with generic implementation Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 12:13   ` Arnd Bergmann
2023-03-27 22:25   ` Christoph Hellwig
2023-03-27 22:25     ` Christoph Hellwig
2023-03-27 22:25     ` Christoph Hellwig
2023-03-27 22:25     ` Christoph Hellwig
2023-03-27 22:25     ` Christoph Hellwig
2023-03-27 22:25     ` Christoph Hellwig
2023-03-31 13:04     ` Arnd Bergmann
2023-03-31 13:04       ` Arnd Bergmann
2023-03-31 13:04       ` Arnd Bergmann
2023-03-31 13:04       ` Arnd Bergmann
2023-03-31 13:04       ` Arnd Bergmann
2023-03-31 13:04       ` Arnd Bergmann
2023-03-30 14:06   ` Lad, Prabhakar
2023-03-30 14:06     ` Lad, Prabhakar
2023-03-30 14:06     ` Lad, Prabhakar
2023-03-30 14:06     ` Lad, Prabhakar
2023-03-30 14:06     ` Lad, Prabhakar
2023-03-30 14:06     ` Lad, Prabhakar
2023-04-13 12:13   ` Biju Das
2023-04-13 12:13     ` Biju Das
2023-04-13 12:13     ` Biju Das
2023-04-13 12:13     ` Biju Das
2023-04-13 12:13     ` Biju Das
2023-04-13 12:13     ` Biju Das
2023-04-13 12:13     ` Biju Das
2023-04-13 12:51     ` Arnd Bergmann
2023-04-13 12:51       ` Arnd Bergmann
2023-04-13 12:51       ` Arnd Bergmann
2023-04-13 12:51       ` Arnd Bergmann
2023-04-13 12:51       ` Arnd Bergmann
2023-04-13 12:51       ` Arnd Bergmann
2023-06-27 16:52       ` Geert Uytterhoeven
2023-06-27 16:52         ` Geert Uytterhoeven
2023-06-27 16:52         ` Geert Uytterhoeven
2023-06-27 16:52         ` Geert Uytterhoeven
2023-06-27 16:52         ` Geert Uytterhoeven
2023-06-27 16:52         ` Geert Uytterhoeven
2023-03-31 16:53 ` [PATCH 00/21] dma-mapping: unify support for cache flushes Catalin Marinas
2023-03-31 16:53   ` Catalin Marinas
2023-03-31 16:53   ` Catalin Marinas
2023-03-31 16:53   ` Catalin Marinas
2023-03-31 16:53   ` Catalin Marinas
2023-03-31 16:53   ` Catalin Marinas
2023-03-31 20:27   ` Arnd Bergmann
2023-03-31 20:27     ` Arnd Bergmann
2023-03-31 20:27     ` Arnd Bergmann
2023-03-31 20:27     ` Arnd Bergmann
2023-03-31 20:27     ` Arnd Bergmann
2023-03-31 20:27     ` Arnd Bergmann
2023-05-25  7:46 ` Lad, Prabhakar
2023-05-25  7:46   ` Lad, Prabhakar
2023-05-25  7:46   ` Lad, Prabhakar
2023-05-25  7:46   ` Lad, Prabhakar
2023-05-25  7:46   ` Lad, Prabhakar
2023-05-25  7:46   ` Lad, Prabhakar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230327121317.4081816-18-arnd@kernel.org \
    --to=arnd@kernel.org \
    --cc=arnd@arndb.de \
    --cc=bcain@quicinc.com \
    --cc=catalin.marinas@arm.com \
    --cc=christophe.leroy@csgroup.eu \
    --cc=conor.dooley@microchip.com \
    --cc=dalias@libc.org \
    --cc=davem@davemloft.net \
    --cc=deller@gmx.de \
    --cc=dinguyen@kernel.org \
    --cc=geert@linux-m68k.org \
    --cc=glaubitz@physik.fu-berlin.de \
    --cc=guoren@kernel.org \
    --cc=hch@lst.de \
    --cc=jcmvbkbc@gmail.com \
    --cc=linus.walleij@linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-hexagon@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-m68k@lists.linux-m68k.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-openrisc@vger.kernel.org \
    --cc=linux-oxnas@groups.io \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=linux-snps-arc@lists.infradead.org \
    --cc=linux-xtensa@linux-xtensa.org \
    --cc=linux@armlinux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=monstr@monstr.eu \
    --cc=mpe@ellerman.id.au \
    --cc=neil.armstrong@linaro.org \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=prabhakar.mahadev-lad.rj@bp.renesas.com \
    --cc=robin.murphy@arm.com \
    --cc=shorne@gmail.com \
    --cc=sparclinux@vger.kernel.org \
    --cc=tsbogend@alpha.franken.de \
    --cc=vgupta@kernel.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.