All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org, iommu@lists.linux-foundation.org,
	linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH 07/33] powerpc/dma: untangle vio_dma_mapping_ops from dma_iommu_ops
Date: Tue,  9 Oct 2018 15:24:34 +0200	[thread overview]
Message-ID: <20181009132500.17643-8-hch@lst.de> (raw)
In-Reply-To: <20181009132500.17643-1-hch@lst.de>

vio_dma_mapping_ops currently does a lot of indirect calls through
dma_iommu_ops, which not only make the code harder to follow but are
also expensive in the post-spectre world.  Unwind the indirect calls
by calling the ppc_iommu_* or iommu_* APIs directly applicable, or
just use the dma_iommu_* methods directly where we can.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/powerpc/include/asm/iommu.h     |  1 +
 arch/powerpc/kernel/dma-iommu.c      |  2 +-
 arch/powerpc/platforms/pseries/vio.c | 87 ++++++++++++----------------
 3 files changed, 38 insertions(+), 52 deletions(-)

diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ab3a4fba38e3..26b7cc176a99 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -244,6 +244,7 @@ static inline int __init tce_iommu_bus_notifier_init(void)
 }
 #endif /* !CONFIG_IOMMU_API */
 
+u64 dma_iommu_get_required_mask(struct device *dev);
 int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 #else
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 2ca6cfaebf65..0613278abf9f 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -92,7 +92,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
 		return 1;
 }
 
-static u64 dma_iommu_get_required_mask(struct device *dev)
+u64 dma_iommu_get_required_mask(struct device *dev)
 {
 	struct iommu_table *tbl = get_iommu_table_base(dev);
 	u64 mask;
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 49e04ec19238..1dfff53ebd7f 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -492,7 +492,9 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
 		return NULL;
 	}
 
-	ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
+	ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
+				    dma_handle, dev->coherent_dma_mask, flag,
+				    dev_to_node(dev));
 	if (unlikely(ret == NULL)) {
 		vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
 		atomic_inc(&viodev->cmo.allocs_failed);
@@ -507,8 +509,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
 
-	dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
-
+	iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
 	vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
 }
 
@@ -518,22 +519,22 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
                                          unsigned long attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
+	struct iommu_table *tbl = get_iommu_table_base(dev);
 	dma_addr_t ret = IOMMU_MAPPING_ERROR;
 
-	tbl = get_iommu_table_base(dev);
-	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return ret;
-	}
-
-	ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
-	if (unlikely(dma_mapping_error(dev, ret))) {
-		vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
-		atomic_inc(&viodev->cmo.allocs_failed);
-	}
-
+	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
+		goto out_fail;
+	ret = iommu_map_page(dev, tbl, page, offset, size, device_to_mask(dev),
+			direction, attrs);
+	if (unlikely(ret == IOMMU_MAPPING_ERROR))
+		goto out_deallocate;
 	return ret;
+
+out_deallocate:
+	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
+out_fail:
+	atomic_inc(&viodev->cmo.allocs_failed);
+	return IOMMU_MAPPING_ERROR;
 }
 
 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
@@ -542,11 +543,9 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
 				     unsigned long attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
-
-	tbl = get_iommu_table_base(dev);
-	dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
+	struct iommu_table *tbl = get_iommu_table_base(dev);
 
+	iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
 	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
 }
 
@@ -555,34 +554,32 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
                                 unsigned long attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
+	struct iommu_table *tbl = get_iommu_table_base(dev);
 	struct scatterlist *sgl;
 	int ret, count;
 	size_t alloc_size = 0;
 
-	tbl = get_iommu_table_base(dev);
 	for_each_sg(sglist, sgl, nelems, count)
 		alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
 
-	if (vio_cmo_alloc(viodev, alloc_size)) {
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return 0;
-	}
-
-	ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
-
-	if (unlikely(!ret)) {
-		vio_cmo_dealloc(viodev, alloc_size);
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return ret;
-	}
+	if (vio_cmo_alloc(viodev, alloc_size))
+		goto out_fail;
+	ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, device_to_mask(dev),
+			direction, attrs);
+	if (unlikely(!ret))
+		goto out_deallocate;
 
 	for_each_sg(sglist, sgl, ret, count)
 		alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
 	if (alloc_size)
 		vio_cmo_dealloc(viodev, alloc_size);
-
 	return ret;
+
+out_deallocate:
+	vio_cmo_dealloc(viodev, alloc_size);
+out_fail:
+	atomic_inc(&viodev->cmo.allocs_failed);
+	return 0;
 }
 
 static void vio_dma_iommu_unmap_sg(struct device *dev,
@@ -591,30 +588,18 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
 		unsigned long attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
+	struct iommu_table *tbl = get_iommu_table_base(dev);
 	struct scatterlist *sgl;
 	size_t alloc_size = 0;
 	int count;
 
-	tbl = get_iommu_table_base(dev);
 	for_each_sg(sglist, sgl, nelems, count)
 		alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
 
-	dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
-
+	ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
 	vio_cmo_dealloc(viodev, alloc_size);
 }
 
-static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
-{
-        return dma_iommu_ops.dma_supported(dev, mask);
-}
-
-static u64 vio_dma_get_required_mask(struct device *dev)
-{
-        return dma_iommu_ops.get_required_mask(dev);
-}
-
 static const struct dma_map_ops vio_dma_mapping_ops = {
 	.alloc             = vio_dma_iommu_alloc_coherent,
 	.free              = vio_dma_iommu_free_coherent,
@@ -623,8 +608,8 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
 	.unmap_sg          = vio_dma_iommu_unmap_sg,
 	.map_page          = vio_dma_iommu_map_page,
 	.unmap_page        = vio_dma_iommu_unmap_page,
-	.dma_supported     = vio_dma_iommu_dma_supported,
-	.get_required_mask = vio_dma_get_required_mask,
+	.dma_supported     = dma_iommu_mapping_error,
+	.get_required_mask = dma_iommu_get_required_mask,
 	.mapping_error	   = dma_iommu_mapping_error,
 };
 
-- 
2.19.0


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>
Cc: linux-arch@vger.kernel.org, linux-mm@kvack.org,
	iommu@lists.linux-foundation.org, linuxppc-dev@lists.ozlabs.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH 07/33] powerpc/dma: untangle vio_dma_mapping_ops from dma_iommu_ops
Date: Tue,  9 Oct 2018 15:24:34 +0200	[thread overview]
Message-ID: <20181009132500.17643-8-hch@lst.de> (raw)
In-Reply-To: <20181009132500.17643-1-hch@lst.de>

vio_dma_mapping_ops currently does a lot of indirect calls through
dma_iommu_ops, which not only make the code harder to follow but are
also expensive in the post-spectre world.  Unwind the indirect calls
by calling the ppc_iommu_* or iommu_* APIs directly applicable, or
just use the dma_iommu_* methods directly where we can.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/powerpc/include/asm/iommu.h     |  1 +
 arch/powerpc/kernel/dma-iommu.c      |  2 +-
 arch/powerpc/platforms/pseries/vio.c | 87 ++++++++++++----------------
 3 files changed, 38 insertions(+), 52 deletions(-)

diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ab3a4fba38e3..26b7cc176a99 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -244,6 +244,7 @@ static inline int __init tce_iommu_bus_notifier_init(void)
 }
 #endif /* !CONFIG_IOMMU_API */
 
+u64 dma_iommu_get_required_mask(struct device *dev);
 int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 #else
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 2ca6cfaebf65..0613278abf9f 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -92,7 +92,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
 		return 1;
 }
 
-static u64 dma_iommu_get_required_mask(struct device *dev)
+u64 dma_iommu_get_required_mask(struct device *dev)
 {
 	struct iommu_table *tbl = get_iommu_table_base(dev);
 	u64 mask;
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 49e04ec19238..1dfff53ebd7f 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -492,7 +492,9 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
 		return NULL;
 	}
 
-	ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
+	ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
+				    dma_handle, dev->coherent_dma_mask, flag,
+				    dev_to_node(dev));
 	if (unlikely(ret == NULL)) {
 		vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
 		atomic_inc(&viodev->cmo.allocs_failed);
@@ -507,8 +509,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
 
-	dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
-
+	iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
 	vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
 }
 
@@ -518,22 +519,22 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
                                          unsigned long attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
+	struct iommu_table *tbl = get_iommu_table_base(dev);
 	dma_addr_t ret = IOMMU_MAPPING_ERROR;
 
-	tbl = get_iommu_table_base(dev);
-	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return ret;
-	}
-
-	ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
-	if (unlikely(dma_mapping_error(dev, ret))) {
-		vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
-		atomic_inc(&viodev->cmo.allocs_failed);
-	}
-
+	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
+		goto out_fail;
+	ret = iommu_map_page(dev, tbl, page, offset, size, device_to_mask(dev),
+			direction, attrs);
+	if (unlikely(ret == IOMMU_MAPPING_ERROR))
+		goto out_deallocate;
 	return ret;
+
+out_deallocate:
+	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
+out_fail:
+	atomic_inc(&viodev->cmo.allocs_failed);
+	return IOMMU_MAPPING_ERROR;
 }
 
 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
@@ -542,11 +543,9 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
 				     unsigned long attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
-
-	tbl = get_iommu_table_base(dev);
-	dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
+	struct iommu_table *tbl = get_iommu_table_base(dev);
 
+	iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
 	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
 }
 
@@ -555,34 +554,32 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
                                 unsigned long attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
+	struct iommu_table *tbl = get_iommu_table_base(dev);
 	struct scatterlist *sgl;
 	int ret, count;
 	size_t alloc_size = 0;
 
-	tbl = get_iommu_table_base(dev);
 	for_each_sg(sglist, sgl, nelems, count)
 		alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
 
-	if (vio_cmo_alloc(viodev, alloc_size)) {
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return 0;
-	}
-
-	ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
-
-	if (unlikely(!ret)) {
-		vio_cmo_dealloc(viodev, alloc_size);
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return ret;
-	}
+	if (vio_cmo_alloc(viodev, alloc_size))
+		goto out_fail;
+	ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, device_to_mask(dev),
+			direction, attrs);
+	if (unlikely(!ret))
+		goto out_deallocate;
 
 	for_each_sg(sglist, sgl, ret, count)
 		alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
 	if (alloc_size)
 		vio_cmo_dealloc(viodev, alloc_size);
-
 	return ret;
+
+out_deallocate:
+	vio_cmo_dealloc(viodev, alloc_size);
+out_fail:
+	atomic_inc(&viodev->cmo.allocs_failed);
+	return 0;
 }
 
 static void vio_dma_iommu_unmap_sg(struct device *dev,
@@ -591,30 +588,18 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
 		unsigned long attrs)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
+	struct iommu_table *tbl = get_iommu_table_base(dev);
 	struct scatterlist *sgl;
 	size_t alloc_size = 0;
 	int count;
 
-	tbl = get_iommu_table_base(dev);
 	for_each_sg(sglist, sgl, nelems, count)
 		alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
 
-	dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
-
+	ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
 	vio_cmo_dealloc(viodev, alloc_size);
 }
 
-static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
-{
-        return dma_iommu_ops.dma_supported(dev, mask);
-}
-
-static u64 vio_dma_get_required_mask(struct device *dev)
-{
-        return dma_iommu_ops.get_required_mask(dev);
-}
-
 static const struct dma_map_ops vio_dma_mapping_ops = {
 	.alloc             = vio_dma_iommu_alloc_coherent,
 	.free              = vio_dma_iommu_free_coherent,
@@ -623,8 +608,8 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
 	.unmap_sg          = vio_dma_iommu_unmap_sg,
 	.map_page          = vio_dma_iommu_map_page,
 	.unmap_page        = vio_dma_iommu_unmap_page,
-	.dma_supported     = vio_dma_iommu_dma_supported,
-	.get_required_mask = vio_dma_get_required_mask,
+	.dma_supported     = dma_iommu_mapping_error,
+	.get_required_mask = dma_iommu_get_required_mask,
 	.mapping_error	   = dma_iommu_mapping_error,
 };
 
-- 
2.19.0


  parent reply	other threads:[~2018-10-09 13:25 UTC|newest]

Thread overview: 85+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-09 13:24 use generic DMA mapping code in powerpc V3 Christoph Hellwig
2018-10-09 13:24 ` Christoph Hellwig
2018-10-09 13:24 ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 01/33] powerpc: use mm zones more sensibly Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-15  0:47   ` Benjamin Herrenschmidt
2018-10-15  0:47     ` Benjamin Herrenschmidt
2018-10-15  0:47     ` Benjamin Herrenschmidt
2018-10-09 13:24 ` [PATCH 02/33] powerpc/dma: remove the unused ARCH_HAS_DMA_MMAP_COHERENT define Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 03/33] powerpc/dma: remove the unused ISA_DMA_THRESHOLD export Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 04/33] powerpc/dma: remove the unused dma_iommu_ops export Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 05/33] powerpc/dma: split the two __dma_alloc_coherent implementations Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 06/33] powerpc/dma: remove the no-op dma_nommu_unmap_{page,sg} routines Christoph Hellwig
2018-10-09 13:24   ` [PATCH 06/33] powerpc/dma: remove the no-op dma_nommu_unmap_{page, sg} routines Christoph Hellwig
2018-10-09 13:24 ` Christoph Hellwig [this message]
2018-10-09 13:24   ` [PATCH 07/33] powerpc/dma: untangle vio_dma_mapping_ops from dma_iommu_ops Christoph Hellwig
2018-10-09 13:24 ` [PATCH 08/33] powerpc/dma: handle iommu bypass in dma_iommu_ops Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 09/33] powerpc/pseries: unwind dma_get_required_mask_pSeriesLP a bit Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 10/33] powerpc/pseries: use the generic iommu bypass code Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 11/33] powerpc/cell: move dma direct window setup out of dma_configure Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 12/33] powerpc/cell: use the generic iommu bypass code Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 13/33] powerpc/dart: remove dead cleanup code in iommu_init_early_dart Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 14/33] powerpc/dart: use the generic iommu bypass code Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 15/33] powerpc/powernv: remove pnv_pci_ioda_pe_single_vendor Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 16/33] powerpc/powernv: remove dead npu-dma code Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-15  1:34   ` Alexey Kardashevskiy
2018-10-15  1:34     ` Alexey Kardashevskiy
2018-10-15  1:34     ` Alexey Kardashevskiy
2018-10-15  2:45     ` Benjamin Herrenschmidt
2018-10-15  2:45       ` Benjamin Herrenschmidt
2018-10-15  5:50     ` Christoph Hellwig
2018-10-15  5:50       ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 17/33] powerpc/powernv: use the generic iommu bypass code Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 18/33] powerpc/dma: stop overriding dma_get_required_mask Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 19/33] powerpc/pci: remove the dma_set_mask pci_controller ops methods Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 20/33] powerpc/dma: remove the iommu fallback for coherent allocations Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 21/33] powerpc/dma: remove get_pci_dma_ops Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 22/33] powerpc/dma: move pci_dma_dev_setup_swiotlb to fsl_pci.c Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 23/33] powerpc/dma: remove max_direct_dma_addr Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 24/33] powerpc/dma: fix an off-by-one in dma_capable Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 25/33] cxl: drop the dma_set_mask callback from vphb Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 26/33] powerpc/fsl_pci: simplify fsl_pci_dma_set_mask Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 27/33] dma-mapping, powerpc: simplify the arch dma_set_mask override Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 28/33] powerpc/dma: use phys_to_dma instead of get_dma_offset Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 29/33] powerpc/dma: remove get_dma_offset Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 30/33] powerpc/dma: remove set_dma_offset Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 31/33] powerpc/dma: remove dma_nommu_mmap_coherent Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:24 ` [PATCH 32/33] powerpc/dma: use generic direct and swiotlb ops Christoph Hellwig
2018-10-09 13:24   ` Christoph Hellwig
2018-10-09 13:25 ` [PATCH 33/33] powerpc/dma: trim the fat from <asm/dma-mapping.h> Christoph Hellwig
2018-10-09 13:25   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181009132500.17643-8-hch@lst.de \
    --to=hch@lst.de \
    --cc=benh@kernel.crashing.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.