All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: x86@kernel.org, linux-arm-kernel@lists.infradead.org,
	xen-devel@lists.xenproject.org, linux-c6x-dev@linux-c6x.org,
	linux-hexagon@vger.kernel.org, linux-ia64@vger.kernel.org,
	linux-mips@linux-mips.org, openrisc@lists.librecores.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-xtensa@linux-xtensa.org, dmaengine@vger.kernel.org,
	linux-tegra@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linux-samsung-soc@vger.kernel.org,
	iommu@lists.linux-foundation.org, netdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Subject: [PATCH 07/44] xen-swiotlb: consolidate xen_swiotlb_dma_ops
Date: Fri, 16 Jun 2017 18:10:22 +0000	[thread overview]
Message-ID: <20170616181059.19206-8-hch@lst.de> (raw)
In-Reply-To: <20170616181059.19206-1-hch@lst.de>

ARM and x86 had duplicated versions of the dma_ops structure, the
only difference is that x86 hasn't wired up the set_dma_mask,
mmap, and get_sgtable ops yet.  On x86 all of them are identical
to the generic version, so they aren't needed but harmless.

All the symbols used only for xen_swiotlb_dma_ops can now be marked
static as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 arch/arm/xen/mm.c              | 17 --------
 arch/x86/xen/pci-swiotlb-xen.c | 14 -------
 drivers/xen/swiotlb-xen.c      | 93 ++++++++++++++++++++++--------------------
 include/xen/swiotlb-xen.h      | 62 +---------------------------
 4 files changed, 49 insertions(+), 137 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index f0325d96b97a..785d2a562a23 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -185,23 +185,6 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 const struct dma_map_ops *xen_dma_ops;
 EXPORT_SYMBOL(xen_dma_ops);
 
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
-	.alloc = xen_swiotlb_alloc_coherent,
-	.free = xen_swiotlb_free_coherent,
-	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
-	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
-	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
-	.map_sg = xen_swiotlb_map_sg_attrs,
-	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
-	.map_page = xen_swiotlb_map_page,
-	.unmap_page = xen_swiotlb_unmap_page,
-	.dma_supported = xen_swiotlb_dma_supported,
-	.set_dma_mask = xen_swiotlb_set_dma_mask,
-	.mmap = xen_swiotlb_dma_mmap,
-	.get_sgtable = xen_swiotlb_get_sgtable,
-};
-
 int __init xen_mm_init(void)
 {
 	struct gnttab_cache_flush cflush;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 42b08f8fc2ca..37c6056a7bba 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -18,20 +18,6 @@
 
 int xen_swiotlb __read_mostly;
 
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
-	.alloc = xen_swiotlb_alloc_coherent,
-	.free = xen_swiotlb_free_coherent,
-	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
-	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
-	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
-	.map_sg = xen_swiotlb_map_sg_attrs,
-	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
-	.map_page = xen_swiotlb_map_page,
-	.unmap_page = xen_swiotlb_unmap_page,
-	.dma_supported = xen_swiotlb_dma_supported,
-};
-
 /*
  * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
  *
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8dab0d3dc172..a0f006daab48 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -295,7 +295,8 @@ int __ref xen_swiotlb_init(int verbose, bool early)
 		free_pages((unsigned long)xen_io_tlb_start, order);
 	return rc;
 }
-void *
+
+static void *
 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 			   dma_addr_t *dma_handle, gfp_t flags,
 			   unsigned long attrs)
@@ -346,9 +347,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 	memset(ret, 0, size);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
 
-void
+static void
 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 			  dma_addr_t dev_addr, unsigned long attrs)
 {
@@ -369,8 +369,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 
 	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
-
 
 /*
  * Map a single buffer of the indicated size for DMA in streaming mode.  The
@@ -379,7 +377,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
  * Once the device is given the dma address, the device owns this memory until
  * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
  */
-dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 				unsigned long offset, size_t size,
 				enum dma_data_direction dir,
 				unsigned long attrs)
@@ -429,7 +427,6 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 
 	return DMA_ERROR_CODE;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
 
 /*
  * Unmap a single streaming mode DMA translation.  The dma_addr and size must
@@ -467,13 +464,12 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 	dma_mark_clean(phys_to_virt(paddr), size);
 }
 
-void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 			    size_t size, enum dma_data_direction dir,
 			    unsigned long attrs)
 {
 	xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
 
 /*
  * Make physical memory consistent for a single streaming mode DMA translation
@@ -516,7 +512,6 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 {
 	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
 
 void
 xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -524,7 +519,25 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
 {
 	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
+
+/*
+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+static void
+xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+			   int nelems, enum dma_data_direction dir,
+			   unsigned long attrs)
+{
+	struct scatterlist *sg;
+	int i;
+
+	BUG_ON(dir = DMA_NONE);
+
+	for_each_sg(sgl, sg, nelems, i)
+		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+
+}
 
 /*
  * Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -542,7 +555,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
  * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
  * same here.
  */
-int
+static int
 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 			 int nelems, enum dma_data_direction dir,
 			 unsigned long attrs)
@@ -599,27 +612,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 	}
 	return nelems;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
-
-/*
- * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs)
-{
-	struct scatterlist *sg;
-	int i;
-
-	BUG_ON(dir = DMA_NONE);
-
-	for_each_sg(sgl, sg, nelems, i)
-		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
-
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
 
 /*
  * Make physical memory consistent for a set of streaming mode DMA translations
@@ -641,21 +633,19 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
 					sg_dma_len(sg), dir, target);
 }
 
-void
+static void
 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 			    int nelems, enum dma_data_direction dir)
 {
 	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
 
-void
+static void
 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 			       int nelems, enum dma_data_direction dir)
 {
 	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -663,14 +653,13 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
  * during bus mastering, then you would pass 0x00ffffff as the mask to
  * this function.
  */
-int
+static int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
 	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
 
-int
+static int
 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 {
 	if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
@@ -680,14 +669,13 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
 
 /*
  * Create userspace mapping for the DMA-coherent memory.
  * This function should be called with the pages from the current domain only,
  * passing pages mapped from other domains would lead to memory corruption.
  */
-int
+static int
 xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
 		     unsigned long attrs)
@@ -699,13 +687,12 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 #endif
 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
 
 /*
  * This function should be called with the pages from the current domain only,
  * passing pages mapped from other domains would lead to memory corruption.
  */
-int
+static int
 xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 			void *cpu_addr, dma_addr_t handle, size_t size,
 			unsigned long attrs)
@@ -727,4 +714,20 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 #endif
 	return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
+
+const struct dma_map_ops xen_swiotlb_dma_ops = {
+	.alloc = xen_swiotlb_alloc_coherent,
+	.free = xen_swiotlb_free_coherent,
+	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+	.map_sg = xen_swiotlb_map_sg_attrs,
+	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
+	.map_page = xen_swiotlb_map_page,
+	.unmap_page = xen_swiotlb_unmap_page,
+	.dma_supported = xen_swiotlb_dma_supported,
+	.set_dma_mask = xen_swiotlb_set_dma_mask,
+	.mmap = xen_swiotlb_dma_mmap,
+	.get_sgtable = xen_swiotlb_get_sgtable,
+};
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 1f6d78f044b6..ed2de363da33 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -1,69 +1,9 @@
 #ifndef __LINUX_SWIOTLB_XEN_H
 #define __LINUX_SWIOTLB_XEN_H
 
-#include <linux/dma-direction.h>
-#include <linux/scatterlist.h>
 #include <linux/swiotlb.h>
 
 extern int xen_swiotlb_init(int verbose, bool early);
+extern const struct dma_map_ops xen_swiotlb_dma_ops;
 
-extern void
-*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-			    dma_addr_t *dma_handle, gfp_t flags,
-			    unsigned long attrs);
-
-extern void
-xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
-			  void *vaddr, dma_addr_t dma_handle,
-			  unsigned long attrs);
-
-extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
-				       unsigned long offset, size_t size,
-				       enum dma_data_direction dir,
-				       unsigned long attrs);
-
-extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
-				   size_t size, enum dma_data_direction dir,
-				   unsigned long attrs);
-extern int
-xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			 int nelems, enum dma_data_direction dir,
-			 unsigned long attrs);
-
-extern void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs);
-
-extern void
-xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-				size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-			    int nelems, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-				   size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-			       int nelems, enum dma_data_direction dir);
-
-extern int
-xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
-
-extern int
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
-
-extern int
-xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
-		     unsigned long attrs);
-
-extern int
-xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
-			void *cpu_addr, dma_addr_t handle, size_t size,
-			unsigned long attrs);
 #endif /* __LINUX_SWIOTLB_XEN_H */
-- 
2.11.0


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: x86@kernel.org, linux-arm-kernel@lists.infradead.org,
	xen-devel@lists.xenproject.org, linux-c6x-dev@linux-c6x.org,
	linux-hexagon@vger.kernel.org, linux-ia64@vger.kernel.org,
	linux-mips@linux-mips.org, openrisc@lists.librecores.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-xtensa@linux-xtensa.org, dmaengine@vger.kernel.org,
	linux-tegra@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linux-samsung-soc@vger.kernel.org,
	iommu@lists.linux-foundation.org, netdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Subject: [PATCH 07/44] xen-swiotlb: consolidate xen_swiotlb_dma_ops
Date: Fri, 16 Jun 2017 20:10:22 +0200	[thread overview]
Message-ID: <20170616181059.19206-8-hch@lst.de> (raw)
In-Reply-To: <20170616181059.19206-1-hch@lst.de>

ARM and x86 had duplicated versions of the dma_ops structure, the
only difference is that x86 hasn't wired up the set_dma_mask,
mmap, and get_sgtable ops yet.  On x86 all of them are identical
to the generic version, so they aren't needed but harmless.

All the symbols used only for xen_swiotlb_dma_ops can now be marked
static as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 arch/arm/xen/mm.c              | 17 --------
 arch/x86/xen/pci-swiotlb-xen.c | 14 -------
 drivers/xen/swiotlb-xen.c      | 93 ++++++++++++++++++++++--------------------
 include/xen/swiotlb-xen.h      | 62 +---------------------------
 4 files changed, 49 insertions(+), 137 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index f0325d96b97a..785d2a562a23 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -185,23 +185,6 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 const struct dma_map_ops *xen_dma_ops;
 EXPORT_SYMBOL(xen_dma_ops);
 
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
-	.alloc = xen_swiotlb_alloc_coherent,
-	.free = xen_swiotlb_free_coherent,
-	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
-	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
-	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
-	.map_sg = xen_swiotlb_map_sg_attrs,
-	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
-	.map_page = xen_swiotlb_map_page,
-	.unmap_page = xen_swiotlb_unmap_page,
-	.dma_supported = xen_swiotlb_dma_supported,
-	.set_dma_mask = xen_swiotlb_set_dma_mask,
-	.mmap = xen_swiotlb_dma_mmap,
-	.get_sgtable = xen_swiotlb_get_sgtable,
-};
-
 int __init xen_mm_init(void)
 {
 	struct gnttab_cache_flush cflush;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 42b08f8fc2ca..37c6056a7bba 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -18,20 +18,6 @@
 
 int xen_swiotlb __read_mostly;
 
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
-	.alloc = xen_swiotlb_alloc_coherent,
-	.free = xen_swiotlb_free_coherent,
-	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
-	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
-	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
-	.map_sg = xen_swiotlb_map_sg_attrs,
-	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
-	.map_page = xen_swiotlb_map_page,
-	.unmap_page = xen_swiotlb_unmap_page,
-	.dma_supported = xen_swiotlb_dma_supported,
-};
-
 /*
  * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
  *
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8dab0d3dc172..a0f006daab48 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -295,7 +295,8 @@ int __ref xen_swiotlb_init(int verbose, bool early)
 		free_pages((unsigned long)xen_io_tlb_start, order);
 	return rc;
 }
-void *
+
+static void *
 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 			   dma_addr_t *dma_handle, gfp_t flags,
 			   unsigned long attrs)
@@ -346,9 +347,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 	memset(ret, 0, size);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
 
-void
+static void
 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 			  dma_addr_t dev_addr, unsigned long attrs)
 {
@@ -369,8 +369,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 
 	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
-
 
 /*
  * Map a single buffer of the indicated size for DMA in streaming mode.  The
@@ -379,7 +377,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
  * Once the device is given the dma address, the device owns this memory until
  * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
  */
-dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 				unsigned long offset, size_t size,
 				enum dma_data_direction dir,
 				unsigned long attrs)
@@ -429,7 +427,6 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 
 	return DMA_ERROR_CODE;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
 
 /*
  * Unmap a single streaming mode DMA translation.  The dma_addr and size must
@@ -467,13 +464,12 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 	dma_mark_clean(phys_to_virt(paddr), size);
 }
 
-void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 			    size_t size, enum dma_data_direction dir,
 			    unsigned long attrs)
 {
 	xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
 
 /*
  * Make physical memory consistent for a single streaming mode DMA translation
@@ -516,7 +512,6 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 {
 	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
 
 void
 xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -524,7 +519,25 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
 {
 	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
+
+/*
+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+static void
+xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+			   int nelems, enum dma_data_direction dir,
+			   unsigned long attrs)
+{
+	struct scatterlist *sg;
+	int i;
+
+	BUG_ON(dir == DMA_NONE);
+
+	for_each_sg(sgl, sg, nelems, i)
+		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+
+}
 
 /*
  * Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -542,7 +555,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
  * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
  * same here.
  */
-int
+static int
 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 			 int nelems, enum dma_data_direction dir,
 			 unsigned long attrs)
@@ -599,27 +612,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 	}
 	return nelems;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
-
-/*
- * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs)
-{
-	struct scatterlist *sg;
-	int i;
-
-	BUG_ON(dir == DMA_NONE);
-
-	for_each_sg(sgl, sg, nelems, i)
-		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
-
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
 
 /*
  * Make physical memory consistent for a set of streaming mode DMA translations
@@ -641,21 +633,19 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
 					sg_dma_len(sg), dir, target);
 }
 
-void
+static void
 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 			    int nelems, enum dma_data_direction dir)
 {
 	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
 
-void
+static void
 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 			       int nelems, enum dma_data_direction dir)
 {
 	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -663,14 +653,13 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
  * during bus mastering, then you would pass 0x00ffffff as the mask to
  * this function.
  */
-int
+static int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
 	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
 
-int
+static int
 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 {
 	if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
@@ -680,14 +669,13 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
 
 /*
  * Create userspace mapping for the DMA-coherent memory.
  * This function should be called with the pages from the current domain only,
  * passing pages mapped from other domains would lead to memory corruption.
  */
-int
+static int
 xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
 		     unsigned long attrs)
@@ -699,13 +687,12 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 #endif
 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
 
 /*
  * This function should be called with the pages from the current domain only,
  * passing pages mapped from other domains would lead to memory corruption.
  */
-int
+static int
 xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 			void *cpu_addr, dma_addr_t handle, size_t size,
 			unsigned long attrs)
@@ -727,4 +714,20 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 #endif
 	return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
+
+const struct dma_map_ops xen_swiotlb_dma_ops = {
+	.alloc = xen_swiotlb_alloc_coherent,
+	.free = xen_swiotlb_free_coherent,
+	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+	.map_sg = xen_swiotlb_map_sg_attrs,
+	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
+	.map_page = xen_swiotlb_map_page,
+	.unmap_page = xen_swiotlb_unmap_page,
+	.dma_supported = xen_swiotlb_dma_supported,
+	.set_dma_mask = xen_swiotlb_set_dma_mask,
+	.mmap = xen_swiotlb_dma_mmap,
+	.get_sgtable = xen_swiotlb_get_sgtable,
+};
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 1f6d78f044b6..ed2de363da33 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -1,69 +1,9 @@
 #ifndef __LINUX_SWIOTLB_XEN_H
 #define __LINUX_SWIOTLB_XEN_H
 
-#include <linux/dma-direction.h>
-#include <linux/scatterlist.h>
 #include <linux/swiotlb.h>
 
 extern int xen_swiotlb_init(int verbose, bool early);
+extern const struct dma_map_ops xen_swiotlb_dma_ops;
 
-extern void
-*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-			    dma_addr_t *dma_handle, gfp_t flags,
-			    unsigned long attrs);
-
-extern void
-xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
-			  void *vaddr, dma_addr_t dma_handle,
-			  unsigned long attrs);
-
-extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
-				       unsigned long offset, size_t size,
-				       enum dma_data_direction dir,
-				       unsigned long attrs);
-
-extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
-				   size_t size, enum dma_data_direction dir,
-				   unsigned long attrs);
-extern int
-xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			 int nelems, enum dma_data_direction dir,
-			 unsigned long attrs);
-
-extern void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs);
-
-extern void
-xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-				size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-			    int nelems, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-				   size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-			       int nelems, enum dma_data_direction dir);
-
-extern int
-xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
-
-extern int
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
-
-extern int
-xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
-		     unsigned long attrs);
-
-extern int
-xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
-			void *cpu_addr, dma_addr_t handle, size_t size,
-			unsigned long attrs);
 #endif /* __LINUX_SWIOTLB_XEN_H */
-- 
2.11.0

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: x86@kernel.org, linux-arm-kernel@lists.infradead.org,
	xen-devel@lists.xenproject.org, linux-c6x-dev@linux-c6x.org,
	linux-hexagon@vger.kernel.org, linux-ia64@vger.kernel.org,
	linux-mips@linux-mips.org, openrisc@lists.librecores.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-xtensa@linux-xtensa.org, dmaengine@vger.kernel.org,
	linux-tegra@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linux-samsung-soc@vger.kernel.org,
	iommu@lists.linux-foundation.org, netdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Subject: [PATCH 07/44] xen-swiotlb: consolidate xen_swiotlb_dma_ops
Date: Fri, 16 Jun 2017 20:10:22 +0200	[thread overview]
Message-ID: <20170616181059.19206-8-hch@lst.de> (raw)
In-Reply-To: <20170616181059.19206-1-hch@lst.de>

ARM and x86 had duplicated versions of the dma_ops structure, the
only difference is that x86 hasn't wired up the set_dma_mask,
mmap, and get_sgtable ops yet.  On x86 all of them are identical
to the generic version, so they aren't needed but harmless.

All the symbols used only for xen_swiotlb_dma_ops can now be marked
static as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 arch/arm/xen/mm.c              | 17 --------
 arch/x86/xen/pci-swiotlb-xen.c | 14 -------
 drivers/xen/swiotlb-xen.c      | 93 ++++++++++++++++++++++--------------------
 include/xen/swiotlb-xen.h      | 62 +---------------------------
 4 files changed, 49 insertions(+), 137 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index f0325d96b97a..785d2a562a23 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -185,23 +185,6 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 const struct dma_map_ops *xen_dma_ops;
 EXPORT_SYMBOL(xen_dma_ops);
 
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
-	.alloc = xen_swiotlb_alloc_coherent,
-	.free = xen_swiotlb_free_coherent,
-	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
-	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
-	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
-	.map_sg = xen_swiotlb_map_sg_attrs,
-	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
-	.map_page = xen_swiotlb_map_page,
-	.unmap_page = xen_swiotlb_unmap_page,
-	.dma_supported = xen_swiotlb_dma_supported,
-	.set_dma_mask = xen_swiotlb_set_dma_mask,
-	.mmap = xen_swiotlb_dma_mmap,
-	.get_sgtable = xen_swiotlb_get_sgtable,
-};
-
 int __init xen_mm_init(void)
 {
 	struct gnttab_cache_flush cflush;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 42b08f8fc2ca..37c6056a7bba 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -18,20 +18,6 @@
 
 int xen_swiotlb __read_mostly;
 
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
-	.alloc = xen_swiotlb_alloc_coherent,
-	.free = xen_swiotlb_free_coherent,
-	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
-	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
-	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
-	.map_sg = xen_swiotlb_map_sg_attrs,
-	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
-	.map_page = xen_swiotlb_map_page,
-	.unmap_page = xen_swiotlb_unmap_page,
-	.dma_supported = xen_swiotlb_dma_supported,
-};
-
 /*
  * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
  *
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8dab0d3dc172..a0f006daab48 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -295,7 +295,8 @@ int __ref xen_swiotlb_init(int verbose, bool early)
 		free_pages((unsigned long)xen_io_tlb_start, order);
 	return rc;
 }
-void *
+
+static void *
 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 			   dma_addr_t *dma_handle, gfp_t flags,
 			   unsigned long attrs)
@@ -346,9 +347,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 	memset(ret, 0, size);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
 
-void
+static void
 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 			  dma_addr_t dev_addr, unsigned long attrs)
 {
@@ -369,8 +369,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 
 	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
-
 
 /*
  * Map a single buffer of the indicated size for DMA in streaming mode.  The
@@ -379,7 +377,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
  * Once the device is given the dma address, the device owns this memory until
  * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
  */
-dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 				unsigned long offset, size_t size,
 				enum dma_data_direction dir,
 				unsigned long attrs)
@@ -429,7 +427,6 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 
 	return DMA_ERROR_CODE;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
 
 /*
  * Unmap a single streaming mode DMA translation.  The dma_addr and size must
@@ -467,13 +464,12 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 	dma_mark_clean(phys_to_virt(paddr), size);
 }
 
-void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 			    size_t size, enum dma_data_direction dir,
 			    unsigned long attrs)
 {
 	xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
 
 /*
  * Make physical memory consistent for a single streaming mode DMA translation
@@ -516,7 +512,6 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 {
 	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
 
 void
 xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -524,7 +519,25 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
 {
 	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
+
+/*
+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+static void
+xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+			   int nelems, enum dma_data_direction dir,
+			   unsigned long attrs)
+{
+	struct scatterlist *sg;
+	int i;
+
+	BUG_ON(dir == DMA_NONE);
+
+	for_each_sg(sgl, sg, nelems, i)
+		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+
+}
 
 /*
  * Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -542,7 +555,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
  * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
  * same here.
  */
-int
+static int
 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 			 int nelems, enum dma_data_direction dir,
 			 unsigned long attrs)
@@ -599,27 +612,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 	}
 	return nelems;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
-
-/*
- * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs)
-{
-	struct scatterlist *sg;
-	int i;
-
-	BUG_ON(dir == DMA_NONE);
-
-	for_each_sg(sgl, sg, nelems, i)
-		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
-
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
 
 /*
  * Make physical memory consistent for a set of streaming mode DMA translations
@@ -641,21 +633,19 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
 					sg_dma_len(sg), dir, target);
 }
 
-void
+static void
 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 			    int nelems, enum dma_data_direction dir)
 {
 	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
 
-void
+static void
 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 			       int nelems, enum dma_data_direction dir)
 {
 	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -663,14 +653,13 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
  * during bus mastering, then you would pass 0x00ffffff as the mask to
  * this function.
  */
-int
+static int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
 	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
 
-int
+static int
 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 {
 	if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
@@ -680,14 +669,13 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
 
 /*
  * Create userspace mapping for the DMA-coherent memory.
  * This function should be called with the pages from the current domain only,
  * passing pages mapped from other domains would lead to memory corruption.
  */
-int
+static int
 xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
 		     unsigned long attrs)
@@ -699,13 +687,12 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 #endif
 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
 
 /*
  * This function should be called with the pages from the current domain only,
  * passing pages mapped from other domains would lead to memory corruption.
  */
-int
+static int
 xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 			void *cpu_addr, dma_addr_t handle, size_t size,
 			unsigned long attrs)
@@ -727,4 +714,20 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 #endif
 	return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
+
+const struct dma_map_ops xen_swiotlb_dma_ops = {
+	.alloc = xen_swiotlb_alloc_coherent,
+	.free = xen_swiotlb_free_coherent,
+	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+	.map_sg = xen_swiotlb_map_sg_attrs,
+	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
+	.map_page = xen_swiotlb_map_page,
+	.unmap_page = xen_swiotlb_unmap_page,
+	.dma_supported = xen_swiotlb_dma_supported,
+	.set_dma_mask = xen_swiotlb_set_dma_mask,
+	.mmap = xen_swiotlb_dma_mmap,
+	.get_sgtable = xen_swiotlb_get_sgtable,
+};
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 1f6d78f044b6..ed2de363da33 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -1,69 +1,9 @@
 #ifndef __LINUX_SWIOTLB_XEN_H
 #define __LINUX_SWIOTLB_XEN_H
 
-#include <linux/dma-direction.h>
-#include <linux/scatterlist.h>
 #include <linux/swiotlb.h>
 
 extern int xen_swiotlb_init(int verbose, bool early);
+extern const struct dma_map_ops xen_swiotlb_dma_ops;
 
-extern void
-*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-			    dma_addr_t *dma_handle, gfp_t flags,
-			    unsigned long attrs);
-
-extern void
-xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
-			  void *vaddr, dma_addr_t dma_handle,
-			  unsigned long attrs);
-
-extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
-				       unsigned long offset, size_t size,
-				       enum dma_data_direction dir,
-				       unsigned long attrs);
-
-extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
-				   size_t size, enum dma_data_direction dir,
-				   unsigned long attrs);
-extern int
-xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			 int nelems, enum dma_data_direction dir,
-			 unsigned long attrs);
-
-extern void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs);
-
-extern void
-xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-				size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-			    int nelems, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-				   size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-			       int nelems, enum dma_data_direction dir);
-
-extern int
-xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
-
-extern int
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
-
-extern int
-xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
-		     unsigned long attrs);
-
-extern int
-xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
-			void *cpu_addr, dma_addr_t handle, size_t size,
-			unsigned long attrs);
 #endif /* __LINUX_SWIOTLB_XEN_H */
-- 
2.11.0

WARNING: multiple messages have this Message-ID (diff)
From: hch@lst.de (Christoph Hellwig)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 07/44] xen-swiotlb: consolidate xen_swiotlb_dma_ops
Date: Fri, 16 Jun 2017 20:10:22 +0200	[thread overview]
Message-ID: <20170616181059.19206-8-hch@lst.de> (raw)
In-Reply-To: <20170616181059.19206-1-hch@lst.de>

ARM and x86 had duplicated versions of the dma_ops structure, the
only difference is that x86 hasn't wired up the set_dma_mask,
mmap, and get_sgtable ops yet.  On x86 all of them are identical
to the generic version, so they aren't needed but harmless.

All the symbols used only for xen_swiotlb_dma_ops can now be marked
static as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 arch/arm/xen/mm.c              | 17 --------
 arch/x86/xen/pci-swiotlb-xen.c | 14 -------
 drivers/xen/swiotlb-xen.c      | 93 ++++++++++++++++++++++--------------------
 include/xen/swiotlb-xen.h      | 62 +---------------------------
 4 files changed, 49 insertions(+), 137 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index f0325d96b97a..785d2a562a23 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -185,23 +185,6 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 const struct dma_map_ops *xen_dma_ops;
 EXPORT_SYMBOL(xen_dma_ops);
 
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
-	.alloc = xen_swiotlb_alloc_coherent,
-	.free = xen_swiotlb_free_coherent,
-	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
-	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
-	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
-	.map_sg = xen_swiotlb_map_sg_attrs,
-	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
-	.map_page = xen_swiotlb_map_page,
-	.unmap_page = xen_swiotlb_unmap_page,
-	.dma_supported = xen_swiotlb_dma_supported,
-	.set_dma_mask = xen_swiotlb_set_dma_mask,
-	.mmap = xen_swiotlb_dma_mmap,
-	.get_sgtable = xen_swiotlb_get_sgtable,
-};
-
 int __init xen_mm_init(void)
 {
 	struct gnttab_cache_flush cflush;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 42b08f8fc2ca..37c6056a7bba 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -18,20 +18,6 @@
 
 int xen_swiotlb __read_mostly;
 
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
-	.alloc = xen_swiotlb_alloc_coherent,
-	.free = xen_swiotlb_free_coherent,
-	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
-	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
-	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
-	.map_sg = xen_swiotlb_map_sg_attrs,
-	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
-	.map_page = xen_swiotlb_map_page,
-	.unmap_page = xen_swiotlb_unmap_page,
-	.dma_supported = xen_swiotlb_dma_supported,
-};
-
 /*
  * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
  *
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8dab0d3dc172..a0f006daab48 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -295,7 +295,8 @@ int __ref xen_swiotlb_init(int verbose, bool early)
 		free_pages((unsigned long)xen_io_tlb_start, order);
 	return rc;
 }
-void *
+
+static void *
 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 			   dma_addr_t *dma_handle, gfp_t flags,
 			   unsigned long attrs)
@@ -346,9 +347,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 	memset(ret, 0, size);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
 
-void
+static void
 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 			  dma_addr_t dev_addr, unsigned long attrs)
 {
@@ -369,8 +369,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 
 	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
-
 
 /*
  * Map a single buffer of the indicated size for DMA in streaming mode.  The
@@ -379,7 +377,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
  * Once the device is given the dma address, the device owns this memory until
  * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
  */
-dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 				unsigned long offset, size_t size,
 				enum dma_data_direction dir,
 				unsigned long attrs)
@@ -429,7 +427,6 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 
 	return DMA_ERROR_CODE;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
 
 /*
  * Unmap a single streaming mode DMA translation.  The dma_addr and size must
@@ -467,13 +464,12 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 	dma_mark_clean(phys_to_virt(paddr), size);
 }
 
-void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 			    size_t size, enum dma_data_direction dir,
 			    unsigned long attrs)
 {
 	xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
 
 /*
  * Make physical memory consistent for a single streaming mode DMA translation
@@ -516,7 +512,6 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 {
 	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
 
 void
 xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -524,7 +519,25 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
 {
 	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
+
+/*
+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+static void
+xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+			   int nelems, enum dma_data_direction dir,
+			   unsigned long attrs)
+{
+	struct scatterlist *sg;
+	int i;
+
+	BUG_ON(dir == DMA_NONE);
+
+	for_each_sg(sgl, sg, nelems, i)
+		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+
+}
 
 /*
  * Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -542,7 +555,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
  * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
  * same here.
  */
-int
+static int
 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 			 int nelems, enum dma_data_direction dir,
 			 unsigned long attrs)
@@ -599,27 +612,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 	}
 	return nelems;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
-
-/*
- * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs)
-{
-	struct scatterlist *sg;
-	int i;
-
-	BUG_ON(dir == DMA_NONE);
-
-	for_each_sg(sgl, sg, nelems, i)
-		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
-
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
 
 /*
  * Make physical memory consistent for a set of streaming mode DMA translations
@@ -641,21 +633,19 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
 					sg_dma_len(sg), dir, target);
 }
 
-void
+static void
 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 			    int nelems, enum dma_data_direction dir)
 {
 	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
 
-void
+static void
 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 			       int nelems, enum dma_data_direction dir)
 {
 	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -663,14 +653,13 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
  * during bus mastering, then you would pass 0x00ffffff as the mask to
  * this function.
  */
-int
+static int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
 	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
 
-int
+static int
 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 {
 	if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
@@ -680,14 +669,13 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
 
 /*
  * Create userspace mapping for the DMA-coherent memory.
  * This function should be called with the pages from the current domain only,
  * passing pages mapped from other domains would lead to memory corruption.
  */
-int
+static int
 xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
 		     unsigned long attrs)
@@ -699,13 +687,12 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 #endif
 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
 
 /*
  * This function should be called with the pages from the current domain only,
  * passing pages mapped from other domains would lead to memory corruption.
  */
-int
+static int
 xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 			void *cpu_addr, dma_addr_t handle, size_t size,
 			unsigned long attrs)
@@ -727,4 +714,20 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 #endif
 	return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
 }
-EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
+
+const struct dma_map_ops xen_swiotlb_dma_ops = {
+	.alloc = xen_swiotlb_alloc_coherent,
+	.free = xen_swiotlb_free_coherent,
+	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+	.map_sg = xen_swiotlb_map_sg_attrs,
+	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
+	.map_page = xen_swiotlb_map_page,
+	.unmap_page = xen_swiotlb_unmap_page,
+	.dma_supported = xen_swiotlb_dma_supported,
+	.set_dma_mask = xen_swiotlb_set_dma_mask,
+	.mmap = xen_swiotlb_dma_mmap,
+	.get_sgtable = xen_swiotlb_get_sgtable,
+};
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 1f6d78f044b6..ed2de363da33 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -1,69 +1,9 @@
 #ifndef __LINUX_SWIOTLB_XEN_H
 #define __LINUX_SWIOTLB_XEN_H
 
-#include <linux/dma-direction.h>
-#include <linux/scatterlist.h>
 #include <linux/swiotlb.h>
 
 extern int xen_swiotlb_init(int verbose, bool early);
+extern const struct dma_map_ops xen_swiotlb_dma_ops;
 
-extern void
-*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-			    dma_addr_t *dma_handle, gfp_t flags,
-			    unsigned long attrs);
-
-extern void
-xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
-			  void *vaddr, dma_addr_t dma_handle,
-			  unsigned long attrs);
-
-extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
-				       unsigned long offset, size_t size,
-				       enum dma_data_direction dir,
-				       unsigned long attrs);
-
-extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
-				   size_t size, enum dma_data_direction dir,
-				   unsigned long attrs);
-extern int
-xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			 int nelems, enum dma_data_direction dir,
-			 unsigned long attrs);
-
-extern void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-			   int nelems, enum dma_data_direction dir,
-			   unsigned long attrs);
-
-extern void
-xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-				size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-			    int nelems, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-				   size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-			       int nelems, enum dma_data_direction dir);
-
-extern int
-xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
-
-extern int
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
-
-extern int
-xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
-		     unsigned long attrs);
-
-extern int
-xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
-			void *cpu_addr, dma_addr_t handle, size_t size,
-			unsigned long attrs);
 #endif /* __LINUX_SWIOTLB_XEN_H */
-- 
2.11.0

  parent reply	other threads:[~2017-06-16 18:10 UTC|newest]

Thread overview: 616+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-08 13:25 clean up and modularize arch dma_mapping interface Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 01/44] firmware/ivc: use dma_mapping_error Christoph Hellwig
2017-06-08 13:25 ` [PATCH 02/44] ibmveth: properly unwind on init errors Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
     [not found]   ` <20170608132609.32662-3-hch-jcswGhMUV9g@public.gmane.org>
2017-06-08 14:21     ` David Miller
2017-06-08 14:21       ` David Miller
2017-06-08 14:21       ` David Miller
2017-06-08 14:21       ` David Miller
2017-06-08 14:21   ` David Miller
2017-06-08 13:25 ` [PATCH 03/44] dmaengine: ioat: don't use DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25 ` [PATCH 04/44] drm/exynos: " Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 05/44] drm/armada: don't abuse DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25 ` [PATCH 06/44] iommu/dma: don't rely on DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25 ` [PATCH 07/44] xen-swiotlb: consolidate xen_swiotlb_dma_ops Christoph Hellwig
     [not found] ` <20170608132609.32662-1-hch-jcswGhMUV9g@public.gmane.org>
2017-06-08 13:25   ` [PATCH 01/44] firmware/ivc: use dma_mapping_error Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-13 12:35     ` Thierry Reding
     [not found]     ` <20170608132609.32662-2-hch-jcswGhMUV9g@public.gmane.org>
2017-06-13 12:35       ` Thierry Reding
2017-06-13 12:35         ` Thierry Reding
2017-06-13 12:35         ` Thierry Reding
2017-06-13 12:35         ` Thierry Reding
2017-06-08 13:25   ` [PATCH 03/44] dmaengine: ioat: don't use DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 16:32     ` Dave Jiang
2017-06-08 16:32     ` Dave Jiang
2017-06-08 16:32       ` Dave Jiang
2017-06-08 16:32       ` Dave Jiang
2017-06-08 16:32       ` Dave Jiang
2017-06-08 16:32       ` Dave Jiang
2017-06-08 16:32       ` Dave Jiang
2017-06-08 16:32     ` Dave Jiang
2017-06-08 16:32     ` Dave Jiang
     [not found]     ` <20170608132609.32662-4-hch-jcswGhMUV9g@public.gmane.org>
2017-06-08 16:32       ` Dave Jiang
2017-06-08 16:32       ` Dave Jiang
2017-06-08 16:32     ` Dave Jiang
2017-06-14  8:31     ` Vinod Koul
2017-06-14  8:31     ` Vinod Koul
2017-06-14  8:43       ` Vinod Koul
2017-06-14  8:31       ` Vinod Koul
2017-06-14  8:31       ` Vinod Koul
2017-06-08 13:25   ` [PATCH 05/44] drm/armada: don't abuse DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25   ` [PATCH 06/44] iommu/dma: don't rely on DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
     [not found]     ` <20170608132609.32662-7-hch-jcswGhMUV9g@public.gmane.org>
2017-06-08 13:59       ` Robin Murphy
2017-06-08 13:59         ` Robin Murphy
2017-06-08 13:59         ` Robin Murphy
2017-06-08 13:59         ` Robin Murphy
2017-06-16  8:37         ` Christoph Hellwig
     [not found]         ` <0bfb0841-f054-78de-628d-a0955336bcb4-5wv7dgnIgG8@public.gmane.org>
2017-06-16  8:37           ` Christoph Hellwig
2017-06-16  8:37             ` Christoph Hellwig
2017-06-16  8:37             ` Christoph Hellwig
2017-06-16  8:37             ` Christoph Hellwig
2017-06-08 13:59     ` Robin Murphy
2017-06-08 13:25   ` [PATCH 07/44] xen-swiotlb: consolidate xen_swiotlb_dma_ops Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-11  2:36     ` Konrad Rzeszutek Wilk
2017-06-11  2:36     ` Konrad Rzeszutek Wilk
2017-06-11  2:36       ` Konrad Rzeszutek Wilk
2017-06-11  2:36       ` Konrad Rzeszutek Wilk
2017-06-11  2:36       ` Konrad Rzeszutek Wilk
2017-06-08 13:25   ` [PATCH 08/44] xen-swiotlb: implement ->mapping_error Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-11  2:37     ` Konrad Rzeszutek Wilk
2017-06-11  2:37       ` Konrad Rzeszutek Wilk
2017-06-11  2:37       ` Konrad Rzeszutek Wilk
2017-06-11  2:37       ` Konrad Rzeszutek Wilk
2017-06-11  2:37     ` Konrad Rzeszutek Wilk
2017-06-08 13:25   ` [PATCH 09/44] c6x: remove DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25   ` [PATCH 10/44] ia64: " Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25   ` [PATCH 11/44] m32r: " Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25   ` [PATCH 13/44] openrisc: " Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25   ` [PATCH 14/44] sh: " Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25   ` [PATCH 17/44] hexagon: switch to use ->mapping_error for error reporting Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-16  0:19     ` Richard Kuo
2017-06-16  0:19       ` Richard Kuo
2017-06-16  0:19       ` Richard Kuo
2017-06-16  0:19     ` Richard Kuo
2017-06-08 13:25   ` [PATCH 19/44] s390: implement ->mapping_error Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 13:25     ` Christoph Hellwig
2017-06-08 16:23     ` Gerald Schaefer
2017-06-08 16:23     ` Gerald Schaefer
2017-06-08 16:23       ` Gerald Schaefer
2017-06-08 16:23       ` Gerald Schaefer
2017-06-08 14:21   ` clean up and modularize arch dma_mapping interface David Miller
2017-06-08 14:21     ` David Miller
2017-06-08 14:21     ` David Miller
2017-06-08 14:21     ` David Miller
2017-06-08 13:25 ` [PATCH 08/44] xen-swiotlb: implement ->mapping_error Christoph Hellwig
2017-06-08 13:25 ` [PATCH 09/44] c6x: remove DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25 ` [PATCH 10/44] ia64: " Christoph Hellwig
2017-06-08 13:25 ` [PATCH 11/44] m32r: " Christoph Hellwig
2017-06-08 13:25 ` [PATCH 12/44] microblaze: " Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 13/44] openrisc: " Christoph Hellwig
2017-06-08 13:25 ` [PATCH 14/44] sh: " Christoph Hellwig
2017-06-08 13:25 ` [PATCH 15/44] xtensa: " Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 16/44] arm64: " Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 14:02   ` Robin Murphy
2017-06-08 14:02   ` Robin Murphy
2017-06-08 14:02     ` Robin Murphy
2017-06-08 14:02     ` Robin Murphy
2017-06-08 14:02     ` Robin Murphy
2017-06-08 13:25 ` [PATCH 17/44] hexagon: switch to use ->mapping_error for error reporting Christoph Hellwig
2017-06-08 13:25 ` [PATCH 18/44] iommu/amd: implement ->mapping_error Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 19/44] s390: " Christoph Hellwig
2017-06-08 13:25 ` [PATCH 20/44] sparc: " Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 14:24   ` David Miller
2017-06-08 14:24   ` David Miller
2017-06-08 14:24     ` David Miller
2017-06-08 14:24     ` David Miller
2017-06-08 14:24     ` David Miller
2017-06-08 13:25 ` [PATCH 21/44] powerpc: " Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-14  9:17   ` Michael Ellerman
     [not found]   ` <20170608132609.32662-22-hch-jcswGhMUV9g@public.gmane.org>
2017-06-14  9:17     ` Michael Ellerman
2017-06-14  9:17       ` Michael Ellerman
2017-06-14  9:17       ` Michael Ellerman
2017-06-14  9:17       ` Michael Ellerman
2017-06-14  9:17       ` Michael Ellerman
2017-06-14  9:17       ` Michael Ellerman
2017-06-08 13:25 ` [PATCH 22/44] x86/pci-nommu: " Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 23/44] x86/calgary: " Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 24/44] x86: remove DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 25/44] arm: implement ->mapping_error Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 14:43   ` Russell King - ARM Linux
2017-06-08 14:43   ` Russell King - ARM Linux
2017-06-08 14:43     ` Russell King - ARM Linux
2017-06-08 14:43     ` Russell King - ARM Linux
2017-06-16  8:43     ` Christoph Hellwig
2017-06-16  8:43     ` Christoph Hellwig
2017-06-16  8:43       ` Christoph Hellwig
2017-06-16  8:43       ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 26/44] dma-mapping: remove DMA_ERROR_CODE Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 27/44] sparc: remove leon_dma_ops Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 14:22   ` David Miller
2017-06-08 14:22   ` David Miller
2017-06-08 14:22     ` David Miller
2017-06-08 14:22     ` David Miller
2017-06-12  8:06   ` Andreas Larsson
2017-06-12  8:06   ` Andreas Larsson
2017-06-12  8:06     ` Andreas Larsson
2017-06-12  8:06     ` Andreas Larsson
2017-06-16  8:45     ` Christoph Hellwig
2017-06-16  8:45       ` Christoph Hellwig
2017-06-16  8:45       ` Christoph Hellwig
2017-06-16  8:45     ` Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 28/44] sparc: remove arch specific dma_supported implementations Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 14:22   ` Julian Calaby
2017-06-08 14:22     ` Julian Calaby
2017-06-08 14:22     ` Julian Calaby
2017-06-16  8:47     ` Christoph Hellwig
2017-06-16  8:47       ` Christoph Hellwig
2017-06-16  8:47       ` Christoph Hellwig
2017-06-16  8:47     ` Christoph Hellwig
2017-06-08 14:22   ` Julian Calaby
2017-06-08 14:24   ` David Miller
2017-06-08 14:24     ` David Miller
2017-06-08 14:24     ` David Miller
2017-06-08 14:24     ` David Miller
2017-06-08 14:24   ` David Miller
2017-06-08 13:25 ` [PATCH 29/44] dma-noop: remove dma_supported and mapping_error methods Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 30/44] dma-virt: " Christoph Hellwig
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 31/44] hexagon: remove arch-specific dma_supported implementation Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-16  0:20   ` Richard Kuo
2017-06-16  0:20   ` Richard Kuo
2017-06-16  0:20     ` Richard Kuo
2017-06-16  0:20     ` Richard Kuo
2017-06-08 13:25 ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 32/44] hexagon: remove the unused dma_is_consistent prototype Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25 ` [PATCH 33/44] openrisc: remove arch-specific dma_supported implementation Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-09 12:20   ` Geert Uytterhoeven
2017-06-09 12:20     ` Geert Uytterhoeven
2017-06-09 12:20     ` Geert Uytterhoeven
2017-06-09 12:20     ` Geert Uytterhoeven
2017-06-09 12:20     ` Geert Uytterhoeven
2017-06-16  8:39     ` Christoph Hellwig
     [not found]     ` <CAMuHMdUPeFJJtz8eJkQEAR-2w9oHt-fXeGHvvKFLfU2A4YyviQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-06-16  8:39       ` Christoph Hellwig
2017-06-16  8:39         ` Christoph Hellwig
2017-06-16  8:39         ` Christoph Hellwig
2017-06-16  8:39         ` Christoph Hellwig
2017-06-16  8:39         ` Christoph Hellwig
2017-06-16  8:39         ` Christoph Hellwig
2017-06-09 12:20   ` Geert Uytterhoeven
2017-06-08 13:25 ` [PATCH 34/44] arm: remove arch specific " Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:25   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 35/44] x86: " Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 36/44] dma-mapping: remove HAVE_ARCH_DMA_SUPPORTED Christoph Hellwig
2017-06-08 13:26 ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 37/44] mips/loongson64: implement ->dma_supported instead of ->set_dma_mask Christoph Hellwig
2017-06-08 13:26 ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 38/44] arm: " Christoph Hellwig
2017-06-08 13:26 ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 39/44] xen-swiotlb: remove xen_swiotlb_set_dma_mask Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 40/44] tile: remove dma_supported and mapping_error methods Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 41/44] powerpc/cell: clean up fixed mapping dma_ops initialization Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 42/44] powerpc/cell: use the dma_supported method for ops switching Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 43/44] dma-mapping: remove the set_dma_mask method Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` [PATCH 44/44] powerpc: merge __dma_set_mask into dma_set_mask Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26   ` Christoph Hellwig
2017-06-08 13:26 ` Christoph Hellwig
2017-06-08 14:21 ` clean up and modularize arch dma_mapping interface David Miller
2017-06-16 18:10 ` clean up and modularize arch dma_mapping interface V2 Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 01/44] firmware/ivc: use dma_mapping_error Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 02/44] ibmveth: properly unwind on init errors Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 03/44] dmaengine: ioat: don't use DMA_ERROR_CODE Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 20:40     ` Alexander Duyck
2017-06-16 20:40     ` Alexander Duyck
2017-06-16 20:40       ` Alexander Duyck
2017-06-16 20:40       ` Alexander Duyck
2017-06-18  7:09       ` Christoph Hellwig
2017-06-18  7:09       ` Christoph Hellwig
2017-06-18  7:09         ` Christoph Hellwig
2017-06-18  7:09         ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 04/44] drm/exynos: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 05/44] drm/armada: don't abuse DMA_ERROR_CODE Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 06/44] iommu/dma: don't rely on DMA_ERROR_CODE Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-19 15:12     ` Robin Murphy
     [not found]     ` <20170616181059.19206-7-hch-jcswGhMUV9g@public.gmane.org>
2017-06-19 15:12       ` Robin Murphy
2017-06-19 15:12         ` Robin Murphy
2017-06-19 15:12         ` Robin Murphy
2017-06-19 15:12         ` Robin Murphy
2017-06-16 18:10   ` Christoph Hellwig [this message]
2017-06-16 18:10     ` [PATCH 07/44] xen-swiotlb: consolidate xen_swiotlb_dma_ops Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 08/44] xen-swiotlb: implement ->mapping_error Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 09/44] c6x: remove DMA_ERROR_CODE Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 10/44] ia64: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 11/44] m32r: " Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 12/44] microblaze: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 13/44] openrisc: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
     [not found]   ` <20170616181059.19206-1-hch-jcswGhMUV9g@public.gmane.org>
2017-06-16 18:10     ` [PATCH 14/44] sh: " Christoph Hellwig
2017-06-16 18:10       ` Christoph Hellwig
2017-06-16 18:10       ` Christoph Hellwig
2017-06-16 18:10       ` Christoph Hellwig
2017-06-16 18:10     ` [PATCH 42/44] powerpc/cell: use the dma_supported method for ops switching Christoph Hellwig
2017-06-16 18:10       ` Christoph Hellwig
2017-06-16 18:10       ` Christoph Hellwig
2017-06-16 18:10       ` Christoph Hellwig
2017-06-17 20:50       ` Benjamin Herrenschmidt
2017-06-17 20:50       ` Benjamin Herrenschmidt
2017-06-17 20:50         ` Benjamin Herrenschmidt
2017-06-17 20:50         ` Benjamin Herrenschmidt
2017-06-17 20:50         ` Benjamin Herrenschmidt
2017-06-17 20:50         ` Benjamin Herrenschmidt
2017-06-18  7:13         ` Christoph Hellwig
2017-06-18  7:13           ` Christoph Hellwig
2017-06-18  7:13           ` Christoph Hellwig
2017-06-18  9:54           ` Benjamin Herrenschmidt
2017-06-18  9:54             ` Benjamin Herrenschmidt
2017-06-18  9:54             ` Benjamin Herrenschmidt
2017-06-18  9:54             ` Benjamin Herrenschmidt
2017-06-18  9:54           ` Benjamin Herrenschmidt
2017-06-18  7:13         ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 14/44] sh: remove DMA_ERROR_CODE Christoph Hellwig
2017-06-16 18:10   ` [PATCH 15/44] xtensa: " Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 16/44] arm64: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 17/44] hexagon: switch to use ->mapping_error for error reporting Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 18/44] iommu/amd: implement ->mapping_error Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 19/44] s390: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 20/44] sparc: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 21/44] powerpc: " Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 22/44] x86/pci-nommu: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 23/44] x86/calgary: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 24/44] x86: remove DMA_ERROR_CODE Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 25/44] arm: implement ->mapping_error Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 26/44] dma-mapping: remove DMA_ERROR_CODE Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 27/44] sparc: remove leon_dma_ops Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 28/44] sparc: remove arch specific dma_supported implementations Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 29/44] dma-noop: remove dma_supported and mapping_error methods Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 30/44] dma-virt: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 31/44] hexagon: remove arch-specific dma_supported implementation Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 32/44] hexagon: remove the unused dma_is_consistent prototype Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 33/44] openrisc: remove arch-specific dma_supported implementation Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 34/44] arm: remove arch specific " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 35/44] x86: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 36/44] dma-mapping: remove HAVE_ARCH_DMA_SUPPORTED Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 37/44] mips/loongson64: implement ->dma_supported instead of ->set_dma_mask Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 38/44] arm: " Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 39/44] xen-swiotlb: remove xen_swiotlb_set_dma_mask Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 40/44] tile: remove dma_supported and mapping_error methods Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 41/44] powerpc/cell: clean up fixed mapping dma_ops initialization Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 42/44] powerpc/cell: use the dma_supported method for ops switching Christoph Hellwig
2017-06-16 18:10   ` [PATCH 43/44] dma-mapping: remove the set_dma_mask method Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` [PATCH 44/44] powerpc: merge __dma_set_mask into dma_set_mask Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10     ` Christoph Hellwig
2017-06-16 18:10   ` Christoph Hellwig
2017-06-20 12:41   ` new dma-mapping tree, was Re: clean up and modularize arch dma_mapping interface V2 Christoph Hellwig
2017-06-20 12:41     ` Christoph Hellwig
2017-06-20 12:41     ` Christoph Hellwig
2017-06-20 12:41     ` Christoph Hellwig
2017-06-20 12:41     ` Christoph Hellwig
2017-06-20 12:41     ` Christoph Hellwig
2017-06-20 13:04     ` Stephen Rothwell
2017-06-20 13:04       ` Stephen Rothwell
2017-06-20 13:04       ` Stephen Rothwell
2017-06-20 13:16       ` Christoph Hellwig
2017-06-20 13:16         ` Christoph Hellwig
2017-06-20 13:16         ` Christoph Hellwig
2017-06-21 13:32         ` Marek Szyprowski
2017-06-21 13:32         ` Marek Szyprowski
2017-06-21 13:32           ` Marek Szyprowski
2017-06-21 13:32           ` Marek Szyprowski
2017-06-21 13:32           ` Marek Szyprowski
     [not found]           ` <5425587c-73e8-e24a-86a3-8a65a7791dcb-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>
2017-06-22 21:53             ` Stephen Rothwell
2017-06-22 21:53               ` Stephen Rothwell
2017-06-22 21:53               ` Stephen Rothwell
2017-06-22 21:53               ` Stephen Rothwell
2017-06-22 21:53           ` Stephen Rothwell
2017-06-26  7:03           ` Christoph Hellwig
2017-06-26  7:03           ` Christoph Hellwig
2017-06-26  7:03             ` Christoph Hellwig
2017-06-26  7:03             ` Christoph Hellwig
2017-06-26  7:03             ` Christoph Hellwig
2017-06-20 13:16       ` Christoph Hellwig
2017-06-20 13:04     ` Stephen Rothwell
2017-06-20 13:14     ` Robin Murphy
2017-06-20 13:14     ` Robin Murphy
2017-06-20 13:14       ` Robin Murphy
2017-06-20 13:14       ` Robin Murphy
2017-06-20 13:14       ` Robin Murphy
2017-06-20 13:15       ` Christoph Hellwig
2017-06-20 13:15       ` Christoph Hellwig
2017-06-20 13:15         ` Christoph Hellwig
2017-06-20 13:15         ` Christoph Hellwig
2017-06-20 13:15         ` Christoph Hellwig
2017-06-20 12:41   ` Christoph Hellwig
2017-06-21 19:24   ` tndave
2017-06-21 19:24     ` tndave
2017-06-21 19:24     ` tndave
2017-06-24  7:18     ` Christoph Hellwig
     [not found]     ` <162d7932-5766-4c29-5471-07d1b699190a-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2017-06-24  7:18       ` Christoph Hellwig
2017-06-24  7:18         ` Christoph Hellwig
2017-06-24  7:18         ` Christoph Hellwig
2017-06-24  7:18         ` Christoph Hellwig
2017-06-24 15:36         ` Benjamin Herrenschmidt
2017-06-24 15:36         ` Benjamin Herrenschmidt
2017-06-24 15:36           ` Benjamin Herrenschmidt
2017-06-24 15:36           ` Benjamin Herrenschmidt
2017-06-24 15:36           ` Benjamin Herrenschmidt
     [not found]           ` <1498318616.31581.87.camel-XVmvHMARGAS8U2dJNN8I7kB+6BGkLq7r@public.gmane.org>
2017-06-26  9:47             ` Christoph Hellwig
2017-06-26  9:47               ` Christoph Hellwig
2017-06-26  9:47               ` Christoph Hellwig
2017-06-26  9:47               ` Christoph Hellwig
     [not found]               ` <20170626094739.GB13981-jcswGhMUV9g@public.gmane.org>
2017-06-26 22:06                 ` tndave
2017-06-26 22:06                   ` tndave
2017-06-26 22:06                   ` tndave
2017-06-26 22:06                   ` tndave
2017-06-26 22:06               ` tndave
2017-06-26  9:47           ` Christoph Hellwig
2017-06-21 19:24   ` tndave
2017-06-20  9:19 ` clean up and modularize arch dma_mapping interface Daniel Vetter
2017-06-20  9:19   ` Daniel Vetter
2017-06-20  9:19   ` Daniel Vetter
2017-06-20  9:19   ` Daniel Vetter
     [not found]   ` <20170620091902.2dldmf43vhazq6yh-dv86pmgwkMBes7Z6vYuT8azUEOm+Xw19@public.gmane.org>
2017-06-20 13:17     ` Christoph Hellwig
2017-06-20 13:17       ` Christoph Hellwig
2017-06-20 13:17       ` Christoph Hellwig
2017-06-20 13:17       ` Christoph Hellwig
2017-06-20 13:17   ` Christoph Hellwig
2017-06-20  9:19 ` Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170616181059.19206-8-hch@lst.de \
    --to=hch@lst.de \
    --cc=dmaengine@vger.kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-c6x-dev@linux-c6x.org \
    --cc=linux-hexagon@vger.kernel.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@linux-mips.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-samsung-soc@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=linux-xtensa@linux-xtensa.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=netdev@vger.kernel.org \
    --cc=openrisc@lists.librecores.org \
    --cc=sparclinux@vger.kernel.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.