linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Stefano Stabellini <sstabellini@kernel.org>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: xen-devel@lists.xenproject.org, iommu@lists.linux-foundation.org,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 09/11] swiotlb-xen: simplify cache maintainance
Date: Fri, 16 Aug 2019 15:00:11 +0200	[thread overview]
Message-ID: <20190816130013.31154-10-hch@lst.de> (raw)
In-Reply-To: <20190816130013.31154-1-hch@lst.de>

Now that we know we always have the dma-noncoherent.h helpers available
if we are on an architecture with support for non-coherent devices,
we can just call them directly, and remove the calls to the dma-direct
routines, including the fact that we call the dma_direct_map_page
routines but ignore the value returned from it.  Instead we now have
Xen wrappers for the arch_sync_dma_for_{device,cpu} helpers that call
the special Xen versions of those routines for foreign pages.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/arm/xen/mm.c           |  47 ++---------------
 drivers/xen/swiotlb-xen.c   |  19 ++++---
 include/xen/page-coherent.h | 100 +++++++++++-------------------------
 3 files changed, 42 insertions(+), 124 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 85482cdda1e5..0eb88f1355c2 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -86,59 +86,18 @@ static void dma_cache_maint(dma_addr_t handle, size_t size,
 	} while (left);
 }
 
-static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir)
+void __xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
+		enum dma_data_direction dir)
 {
 	dma_cache_maint(handle, size, dir, DMA_UNMAP);
 }
 
-static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+void __xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
 		size_t size, enum dma_data_direction dir)
 {
 	dma_cache_maint(handle, size, dir, DMA_MAP);
 }
 
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
-	     dma_addr_t dev_addr, unsigned long offset, size_t size,
-	     enum dma_data_direction dir, unsigned long attrs)
-{
-	if (dev_is_dma_coherent(hwdev))
-		return;
-	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-		return;
-
-	__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
-}
-
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir,
-		unsigned long attrs)
-
-{
-	if (dev_is_dma_coherent(hwdev))
-		return;
-	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-		return;
-
-	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
-}
-
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	if (dev_is_dma_coherent(hwdev))
-		return;
-	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
-}
-
-void __xen_dma_sync_single_for_device(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	if (dev_is_dma_coherent(hwdev))
-		return;
-	__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
-}
-
 bool xen_arch_need_swiotlb(struct device *dev,
 			   phys_addr_t phys,
 			   dma_addr_t dev_addr)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 7b23929854e7..c3c383033ae4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -388,6 +388,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 	if (map == (phys_addr_t)DMA_MAPPING_ERROR)
 		return DMA_MAPPING_ERROR;
 
+	phys = map;
 	dev_addr = xen_phys_to_bus(map);
 
 	/*
@@ -399,14 +400,9 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 		return DMA_MAPPING_ERROR;
 	}
 
-	page = pfn_to_page(map >> PAGE_SHIFT);
-	offset = map & ~PAGE_MASK;
 done:
-	/*
-	 * we are not interested in the dma_addr returned by xen_dma_map_page,
-	 * only in the potential cache flushes executed by the function.
-	 */
-	xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
+	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
 	return dev_addr;
 }
 
@@ -426,7 +422,8 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 
 	BUG_ON(dir == DMA_NONE);
 
-	xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
+	if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
 
 	/* NOTE: We use dev_addr here, not paddr! */
 	if (is_xen_swiotlb_buffer(dev_addr))
@@ -446,7 +443,8 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
 {
 	phys_addr_t paddr = xen_bus_to_phys(dma_addr);
 
-	xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir);
+	if (!dev_is_dma_coherent(dev))
+		xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
 
 	if (is_xen_swiotlb_buffer(dma_addr))
 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@@ -461,7 +459,8 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
 	if (is_xen_swiotlb_buffer(dma_addr))
 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
 
-	xen_dma_sync_single_for_device(dev, dma_addr, size, dir);
+	if (!dev_is_dma_coherent(dev))
+		xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
 }
 
 /*
diff --git a/include/xen/page-coherent.h b/include/xen/page-coherent.h
index 0f4d468e7a89..38b572ed0879 100644
--- a/include/xen/page-coherent.h
+++ b/include/xen/page-coherent.h
@@ -2,88 +2,48 @@
 #ifndef _XEN_PAGE_COHERENT_H
 #define _XEN_PAGE_COHERENT_H
 
-#include <linux/dma-mapping.h>
-#include <asm/page.h>
+#include <linux/dma-noncoherent.h>
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU)
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
-	     dma_addr_t dev_addr, unsigned long offset, size_t size,
-	     enum dma_data_direction dir, unsigned long attrs);
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir,
-		unsigned long attrs);
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir);
-void __xen_dma_sync_single_for_device(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	unsigned long pfn = PFN_DOWN(handle);
-
-	if (pfn_valid(pfn))
-		dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
-	else
-		__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	unsigned long pfn = PFN_DOWN(handle);
-	if (pfn_valid(pfn))
-		dma_direct_sync_single_for_device(hwdev, handle, size, dir);
-	else
-		__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
-	     dma_addr_t dev_addr, unsigned long offset, size_t size,
-	     enum dma_data_direction dir, unsigned long attrs)
-{
-	unsigned long pfn = PFN_DOWN(dev_addr);
-
-	if (pfn_valid(pfn))
-		dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
+/*
+ * Dom0 is mapped 1:1, while the Linux page can be spanned accross multiple Xen
+ * pages, it's not possible to have a mix of local and foreign Xen page.  Dom0
+ * is mapped 1:1, so calling pfn_valid on a foreign mfn will always return
+ * false.  If the page is local we can safely use the native routines for cache
+ * maintainance, otherwise we call the Xen specific function.
+ */
+void __xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
+		enum dma_data_direction dir);
+void __xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir);
+
+static inline void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
+		phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+{
+	if (pfn_valid(PFN_DOWN(handle)))
+		arch_sync_dma_for_cpu(dev, paddr, size, dir);
 	else
-		__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+		__xen_dma_sync_for_cpu(dev, handle, size, dir);
 }
 
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir, unsigned long attrs)
+static inline void xen_dma_sync_for_device(struct device *dev,
+		dma_addr_t handle, phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
-	unsigned long pfn = PFN_DOWN(handle);
-	/*
-	 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
-	 * multiple Xen page, it's not possible to have a mix of local and
-	 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
-	 * foreign mfn will always return false. If the page is local we can
-	 * safely call the native dma_ops function, otherwise we call the xen
-	 * specific function.
-	 */
-	if (pfn_valid(pfn))
-		dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
+	if (pfn_valid(PFN_DOWN(handle)))
+		arch_sync_dma_for_device(dev, paddr, size, dir);
 	else
-		__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+		__xen_dma_sync_for_device(dev, handle, size, dir);
 }
 #else
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
-	     dma_addr_t dev_addr, unsigned long offset, size_t size,
-	     enum dma_data_direction dir, unsigned long attrs)
-{
-}
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-}
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+static inline void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
+		phys_addr_t paddr, size_t size, enum dma_data_direction dir)
 {
 }
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+static inline void xen_dma_sync_for_device(struct device *dev,
+		dma_addr_t handle, phys_addr_t paddr, size_t size,
+		enum dma_data_direction dir)
 {
 }
 #endif
-- 
2.20.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-08-16 13:04 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-16 13:00 swiotlb-xen cleanups Christoph Hellwig
2019-08-16 13:00 ` [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance Christoph Hellwig
2019-08-19 11:45   ` [Xen-devel] " Julien Grall
2019-08-26  9:20     ` Christoph Hellwig
2019-08-16 13:00 ` [PATCH 02/11] xen/arm: use dev_is_dma_coherent Christoph Hellwig
2019-08-19 11:31   ` [Xen-devel] " Julien Grall
2019-08-16 13:00 ` [PATCH 03/11] xen/arm: pass one less argument to dma_cache_maint Christoph Hellwig
2019-08-16 13:37   ` Robin Murphy
2019-08-16 16:43     ` Christoph Hellwig
2019-08-16 13:00 ` [PATCH 04/11] xen/arm: remove xen_dma_ops Christoph Hellwig
2019-08-19 11:38   ` [Xen-devel] " Julien Grall
2019-08-16 13:00 ` [PATCH 05/11] xen: remove the exports for xen_{create, destroy}_contiguous_region Christoph Hellwig
2019-08-16 13:00 ` [PATCH 06/11] swiotlb-xen: always use dma-direct helpers to alloc coherent pages Christoph Hellwig
2019-08-16 13:00 ` [PATCH 07/11] swiotlb-xen: provide a single page-coherent.h header Christoph Hellwig
2019-08-16 22:40   ` [Xen-devel] " Julien Grall
2019-08-17  6:50     ` Christoph Hellwig
2019-08-17 18:20       ` Julien Grall
2019-08-16 13:00 ` [PATCH 08/11] swiotlb-xen: use the same foreign page check everywhere Christoph Hellwig
2019-08-19 13:53   ` [Xen-devel] " Julien Grall
2019-08-16 13:00 ` Christoph Hellwig [this message]
2019-08-16 13:00 ` [PATCH 10/11] swiotlb-xen: merge xen_unmap_single into xen_swiotlb_unmap_page Christoph Hellwig
2019-08-16 13:00 ` [PATCH 11/11] arm64: use asm-generic/dma-mapping.h Christoph Hellwig
2019-08-19  7:32   ` Will Deacon
2019-08-27  2:00 ` swiotlb-xen cleanups Stefano Stabellini
2019-08-27  6:21   ` Christoph Hellwig
2019-09-05 11:33 swiotlb-xen cleanups v4 Christoph Hellwig
2019-09-05 11:34 ` [PATCH 09/11] swiotlb-xen: simplify cache maintainance Christoph Hellwig
2019-09-06 13:52   ` Boris Ostrovsky
2019-09-06 14:01     ` Christoph Hellwig
2019-09-06 14:19       ` Boris Ostrovsky
2019-09-06 14:43         ` Konrad Rzeszutek Wilk
2019-09-06 14:46           ` Boris Ostrovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190816130013.31154-10-hch@lst.de \
    --to=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=sstabellini@kernel.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).