All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Joerg Roedel <joro@8bytes.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	iommu@lists.linux-foundation.org,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH 10/26] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers
Date: Mon, 22 Apr 2019 19:59:26 +0200	[thread overview]
Message-ID: <20190422175942.18788-11-hch@lst.de> (raw)
In-Reply-To: <20190422175942.18788-1-hch@lst.de>

From: Robin Murphy <robin.murphy@arm.com>

The remaining internal callsites don't care about having prototypes
compatible with the relevant dma_map_ops callbacks, so the extra
level of indirection just wastes space and complictaes things.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/iommu/dma-iommu.c | 25 +++++++------------------
 1 file changed, 7 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4ebd08e3a83a..b52c5d6be7b4 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -698,18 +698,6 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
 		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
 }
 
-static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size, int prot)
-{
-	return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
-}
-
-static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-	__iommu_dma_unmap(dev, handle, size);
-}
-
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 		unsigned long offset, size_t size, enum dma_data_direction dir,
 		unsigned long attrs)
@@ -955,7 +943,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 		if (!addr)
 			return NULL;
 
-		*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+		*handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
+					  ioprot);
 		if (*handle == DMA_MAPPING_ERROR) {
 			if (coherent)
 				__free_pages(page, get_order(size));
@@ -972,7 +961,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 		if (!page)
 			return NULL;
 
-		*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+		*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
 		if (*handle == DMA_MAPPING_ERROR) {
 			dma_release_from_contiguous(dev, page,
 						    size >> PAGE_SHIFT);
@@ -986,7 +975,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 				arch_dma_prep_coherent(page, iosize);
 			memset(addr, 0, size);
 		} else {
-			__iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+			__iommu_dma_unmap(dev, *handle, iosize);
 			dma_release_from_contiguous(dev, page,
 						    size >> PAGE_SHIFT);
 		}
@@ -1025,12 +1014,12 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	 * Hence how dodgy the below logic looks...
 	 */
 	if (dma_in_atomic_pool(cpu_addr, size)) {
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+		__iommu_dma_unmap(dev, handle, iosize);
 		dma_free_from_pool(cpu_addr, size);
 	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
 		struct page *page = vmalloc_to_page(cpu_addr);
 
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
+		__iommu_dma_unmap(dev, handle, iosize);
 		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
 	} else if (is_vmalloc_addr(cpu_addr)){
@@ -1041,7 +1030,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 		__iommu_dma_free(dev, area->pages, iosize, &handle);
 		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
 	} else {
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+		__iommu_dma_unmap(dev, handle, iosize);
 		__free_pages(virt_to_page(cpu_addr), get_order(size));
 	}
 }
-- 
2.20.1


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 10/26] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers
Date: Mon, 22 Apr 2019 19:59:26 +0200	[thread overview]
Message-ID: <20190422175942.18788-11-hch@lst.de> (raw)
Message-ID: <20190422175926.ejTzUqfQauVsTI4T7gMPEtpvM5-GaKMNT4jYQLpWVtc@z> (raw)
In-Reply-To: <20190422175942.18788-1-hch@lst.de>

From: Robin Murphy <robin.murphy@arm.com>

The remaining internal callsites don't care about having prototypes
compatible with the relevant dma_map_ops callbacks, so the extra
level of indirection just wastes space and complictaes things.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/iommu/dma-iommu.c | 25 +++++++------------------
 1 file changed, 7 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4ebd08e3a83a..b52c5d6be7b4 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -698,18 +698,6 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
 		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
 }
 
-static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size, int prot)
-{
-	return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
-}
-
-static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-	__iommu_dma_unmap(dev, handle, size);
-}
-
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 		unsigned long offset, size_t size, enum dma_data_direction dir,
 		unsigned long attrs)
@@ -955,7 +943,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 		if (!addr)
 			return NULL;
 
-		*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+		*handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
+					  ioprot);
 		if (*handle == DMA_MAPPING_ERROR) {
 			if (coherent)
 				__free_pages(page, get_order(size));
@@ -972,7 +961,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 		if (!page)
 			return NULL;
 
-		*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+		*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
 		if (*handle == DMA_MAPPING_ERROR) {
 			dma_release_from_contiguous(dev, page,
 						    size >> PAGE_SHIFT);
@@ -986,7 +975,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 				arch_dma_prep_coherent(page, iosize);
 			memset(addr, 0, size);
 		} else {
-			__iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+			__iommu_dma_unmap(dev, *handle, iosize);
 			dma_release_from_contiguous(dev, page,
 						    size >> PAGE_SHIFT);
 		}
@@ -1025,12 +1014,12 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	 * Hence how dodgy the below logic looks...
 	 */
 	if (dma_in_atomic_pool(cpu_addr, size)) {
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+		__iommu_dma_unmap(dev, handle, iosize);
 		dma_free_from_pool(cpu_addr, size);
 	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
 		struct page *page = vmalloc_to_page(cpu_addr);
 
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
+		__iommu_dma_unmap(dev, handle, iosize);
 		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
 	} else if (is_vmalloc_addr(cpu_addr)){
@@ -1041,7 +1030,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 		__iommu_dma_free(dev, area->pages, iosize, &handle);
 		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
 	} else {
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+		__iommu_dma_unmap(dev, handle, iosize);
 		__free_pages(virt_to_page(cpu_addr), get_order(size));
 	}
 }
-- 
2.20.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Joerg Roedel <joro@8bytes.org>, Will Deacon <will.deacon@arm.com>,
	linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 10/26] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers
Date: Mon, 22 Apr 2019 19:59:26 +0200	[thread overview]
Message-ID: <20190422175942.18788-11-hch@lst.de> (raw)
In-Reply-To: <20190422175942.18788-1-hch@lst.de>

From: Robin Murphy <robin.murphy@arm.com>

The remaining internal callsites don't care about having prototypes
compatible with the relevant dma_map_ops callbacks, so the extra
level of indirection just wastes space and complictaes things.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/iommu/dma-iommu.c | 25 +++++++------------------
 1 file changed, 7 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4ebd08e3a83a..b52c5d6be7b4 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -698,18 +698,6 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
 		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
 }
 
-static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size, int prot)
-{
-	return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
-}
-
-static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-	__iommu_dma_unmap(dev, handle, size);
-}
-
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 		unsigned long offset, size_t size, enum dma_data_direction dir,
 		unsigned long attrs)
@@ -955,7 +943,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 		if (!addr)
 			return NULL;
 
-		*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+		*handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
+					  ioprot);
 		if (*handle == DMA_MAPPING_ERROR) {
 			if (coherent)
 				__free_pages(page, get_order(size));
@@ -972,7 +961,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 		if (!page)
 			return NULL;
 
-		*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+		*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
 		if (*handle == DMA_MAPPING_ERROR) {
 			dma_release_from_contiguous(dev, page,
 						    size >> PAGE_SHIFT);
@@ -986,7 +975,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 				arch_dma_prep_coherent(page, iosize);
 			memset(addr, 0, size);
 		} else {
-			__iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+			__iommu_dma_unmap(dev, *handle, iosize);
 			dma_release_from_contiguous(dev, page,
 						    size >> PAGE_SHIFT);
 		}
@@ -1025,12 +1014,12 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	 * Hence how dodgy the below logic looks...
 	 */
 	if (dma_in_atomic_pool(cpu_addr, size)) {
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+		__iommu_dma_unmap(dev, handle, iosize);
 		dma_free_from_pool(cpu_addr, size);
 	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
 		struct page *page = vmalloc_to_page(cpu_addr);
 
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
+		__iommu_dma_unmap(dev, handle, iosize);
 		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
 	} else if (is_vmalloc_addr(cpu_addr)){
@@ -1041,7 +1030,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 		__iommu_dma_free(dev, area->pages, iosize, &handle);
 		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
 	} else {
-		__iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+		__iommu_dma_unmap(dev, handle, iosize);
 		__free_pages(virt_to_page(cpu_addr), get_order(size));
 	}
 }
-- 
2.20.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-04-22 18:00 UTC|newest]

Thread overview: 155+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-22 17:59 implement generic dma_map_ops for IOMMUs v3 Christoph Hellwig
2019-04-22 17:59 ` Christoph Hellwig
2019-04-22 17:59 ` Christoph Hellwig
2019-04-22 17:59 ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 01/26] arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 02/26] arm64/iommu: improve mmap bounds checking Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 12:35   ` Robin Murphy
2019-04-29 12:35     ` Robin Murphy
2019-04-29 12:35     ` Robin Murphy
2019-04-29 19:01     ` Christoph Hellwig
2019-04-29 19:01       ` Christoph Hellwig
2019-04-29 19:01       ` Christoph Hellwig
2019-04-30 11:38       ` Robin Murphy
2019-04-30 11:38         ` Robin Murphy
2019-04-30 11:38         ` Robin Murphy
2019-04-22 17:59 ` [PATCH 03/26] dma-mapping: add a Kconfig symbol to indicated arch_dma_prep_coherent presence Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 04/26] iommu/dma: Cleanup dma-iommu.h Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 05/26] iommu/dma: Remove the flush_page callback Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 06/26] iommu/dma: Use for_each_sg in iommu_dma_alloc Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 07/26] iommu/dma: move the arm64 wrappers to common code Christoph Hellwig
2019-06-05  0:47   ` Hillf Danton
2019-06-05  0:47   ` Hillf Danton
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 12:56   ` Robin Murphy
2019-04-29 12:56     ` Robin Murphy
2019-04-29 12:56     ` Robin Murphy
2019-06-03 19:47     ` Jon Hunter
2019-06-03 19:47       ` Jon Hunter
2019-06-03 19:47       ` Jon Hunter
2019-06-03 19:47       ` Jon Hunter
     [not found]       ` <acb46c7f-0855-de30-485f-a6242968f947-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-04  6:05         ` Christoph Hellwig
2019-06-04  6:05           ` Christoph Hellwig
2019-06-04  6:05           ` Christoph Hellwig
2019-06-04  6:05           ` Christoph Hellwig
     [not found]           ` <20190604060554.GA14536-jcswGhMUV9g@public.gmane.org>
2019-06-04 11:35             ` Jon Hunter
2019-06-04 11:35               ` Jon Hunter
2019-06-04 11:35               ` Jon Hunter
2019-06-04 11:35               ` Jon Hunter
2019-04-22 17:59 ` [PATCH 08/26] iommu/dma: Move __iommu_dma_map Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 09/26] iommu/dma: Move domain lookup into __iommu_dma_{map,unmap} Christoph Hellwig
2019-04-22 17:59   ` [PATCH 09/26] iommu/dma: Move domain lookup into __iommu_dma_{map, unmap} Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` Christoph Hellwig [this message]
2019-04-22 17:59   ` [PATCH 10/26] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 11/26] iommu/dma: Factor out remapped pages lookup Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 13:05   ` Robin Murphy
2019-04-29 13:05     ` Robin Murphy
2019-04-29 13:05     ` Robin Murphy
2019-04-29 19:10     ` Christoph Hellwig
2019-04-29 19:10       ` Christoph Hellwig
2019-04-29 19:10       ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 12/26] iommu/dma: Refactor the page array remapping allocator Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 13:10   ` Robin Murphy
2019-04-29 13:10     ` Robin Murphy
2019-04-29 13:10     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 13/26] iommu/dma: Remove __iommu_dma_free Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 13:18   ` Robin Murphy
2019-04-29 13:18     ` Robin Murphy
2019-04-29 13:18     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 14/26] iommu/dma: Refactor iommu_dma_free Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 13:59   ` Robin Murphy
2019-04-29 13:59     ` Robin Murphy
2019-04-29 13:59     ` Robin Murphy
2019-04-29 19:03     ` Christoph Hellwig
2019-04-29 19:03       ` Christoph Hellwig
2019-04-29 19:03       ` Christoph Hellwig
2019-04-29 19:16       ` Christoph Hellwig
2019-04-29 19:16         ` Christoph Hellwig
2019-04-29 19:16         ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 15/26] iommu/dma: Refactor iommu_dma_alloc Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 16/26] iommu/dma: Don't remap CMA unnecessarily Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 17/26] iommu/dma: Merge the CMA and alloc_pages allocation paths Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 18/26] iommu/dma: Split iommu_dma_free Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 19/26] iommu/dma: Cleanup variable naming in iommu_dma_alloc Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:11   ` Robin Murphy
2019-04-29 14:11     ` Robin Murphy
2019-04-29 14:11     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 20/26] iommu/dma: Refactor iommu_dma_alloc, part 2 Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:45   ` Robin Murphy
2019-04-29 14:45     ` Robin Murphy
2019-04-29 14:45     ` Robin Murphy
2019-04-29 14:45     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 21/26] iommu/dma: Refactor iommu_dma_get_sgtable Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:08   ` Robin Murphy
2019-04-29 14:08     ` Robin Murphy
2019-04-29 14:08     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 22/26] iommu/dma: Refactor iommu_dma_mmap Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:04   ` Robin Murphy
2019-04-29 14:04     ` Robin Murphy
2019-04-29 14:04     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 23/26] iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 14:46   ` Robin Murphy
2019-04-29 14:46     ` Robin Murphy
2019-04-29 14:46     ` Robin Murphy
2019-04-22 17:59 ` [PATCH 24/26] iommu/dma: Switch copyright boilerplace to SPDX Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 25/26] arm64: switch copyright boilerplace to SPDX in dma-mapping.c Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 26/26] arm64: trim includes " Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-22 17:59   ` Christoph Hellwig
2019-04-29 15:00   ` Robin Murphy
2019-04-29 15:00     ` Robin Murphy
2019-04-29 15:00     ` Robin Murphy
2019-04-29 15:03 ` implement generic dma_map_ops for IOMMUs v3 Robin Murphy
2019-04-29 15:03   ` Robin Murphy
2019-04-29 15:03   ` Robin Murphy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190422175942.18788-11-hch@lst.de \
    --to=hch@lst.de \
    --cc=catalin.marinas@arm.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=thomas.lendacky@amd.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.