All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: torvalds@linux-foundation.org, axboe@kernel.dk
Cc: dan.j.williams@intel.com, vgupta@synopsys.com,
	hskinnemoen@gmail.com, egtvedt@samfundet.no, realmz6@gmail.com,
	dhowells@redhat.com, monstr@monstr.eu, x86@kernel.org,
	dwmw2@infradead.org, alex.williamson@redhat.com,
	grundler@parisc-linux.org, linux-kernel@vger.kernel.org,
	linux-arch@vger.kernel.org, linux-alpha@vger.kernel.org,
	linux-ia64@vger.kernel.org, linux-metag@vger.kernel.org,
	linux-mips@linux-mips.org, linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	sparclinux@vger.kernel.org, linux-xtensa@linux-xtensa.org,
	linux-nvdimm@ml01.01.org, linux-media@vger.kernel.org
Subject: [PATCH 07/31] alpha/pci_iommu: handle page-less SG entries
Date: Wed, 12 Aug 2015 09:05:26 +0200	[thread overview]
Message-ID: <1439363150-8661-8-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1439363150-8661-1-git-send-email-hch@lst.de>

Use sg_phys() instead of virt_to_phys(sg_virt(sg)) so that we don't
require a kernel virtual address, and switch a few debug printfs to
print physical instead of virtual addresses.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/alpha/kernel/pci_iommu.c | 36 +++++++++++++++---------------------
 1 file changed, 15 insertions(+), 21 deletions(-)

diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index eddee77..5d46b49 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -248,20 +248,17 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
    until either pci_unmap_single or pci_dma_sync_single is performed.  */
 
 static dma_addr_t
-pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
+pci_map_single_1(struct pci_dev *pdev, unsigned long paddr, size_t size,
 		 int dac_allowed)
 {
 	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
 	dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
 	struct pci_iommu_arena *arena;
 	long npages, dma_ofs, i;
-	unsigned long paddr;
 	dma_addr_t ret;
 	unsigned int align = 0;
 	struct device *dev = pdev ? &pdev->dev : NULL;
 
-	paddr = __pa(cpu_addr);
-
 #if !DEBUG_NODIRECT
 	/* First check to see if we can use the direct map window.  */
 	if (paddr + size + __direct_map_base - 1 <= max_dma
@@ -269,7 +266,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
 		ret = paddr + __direct_map_base;
 
 		DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
-		      cpu_addr, size, ret, __builtin_return_address(0));
+		      paddr, size, ret, __builtin_return_address(0));
 
 		return ret;
 	}
@@ -280,7 +277,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
 		ret = paddr + alpha_mv.pci_dac_offset;
 
 		DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
-		      cpu_addr, size, ret, __builtin_return_address(0));
+		      paddr, size, ret, __builtin_return_address(0));
 
 		return ret;
 	}
@@ -309,15 +306,15 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
 		return 0;
 	}
 
+	offset = paddr & ~PAGE_MASK;
 	paddr &= PAGE_MASK;
 	for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
 		arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
 
-	ret = arena->dma_base + dma_ofs * PAGE_SIZE;
-	ret += (unsigned long)cpu_addr & ~PAGE_MASK;
+	ret = arena->dma_base + dma_ofs * PAGE_SIZE + offset;
 
 	DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
-	      cpu_addr, size, npages, ret, __builtin_return_address(0));
+	      paddr, size, npages, ret, __builtin_return_address(0));
 
 	return ret;
 }
@@ -357,7 +354,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
 	BUG_ON(dir == PCI_DMA_NONE);
 
 	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 
-	return pci_map_single_1(pdev, (char *)page_address(page) + offset, 
+	return pci_map_single_1(pdev, page_to_phys(page) + offset,
 				size, dac_allowed);
 }
 
@@ -453,7 +450,7 @@ try_again:
 	}
 	memset(cpu_addr, 0, size);
 
-	*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
+	*dma_addrp = pci_map_single_1(pdev, __pa(cpu_addr), size, 0);
 	if (*dma_addrp == 0) {
 		free_pages((unsigned long)cpu_addr, order);
 		if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
@@ -497,9 +494,6 @@ static void alpha_pci_free_coherent(struct device *dev, size_t size,
    Write dma_length of each leader with the combined lengths of
    the mergable followers.  */
 
-#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
-#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
-
 static void
 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
 	    int virt_ok)
@@ -512,13 +506,13 @@ sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
 	leader = sg;
 	leader_flag = 0;
 	leader_length = leader->length;
-	next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
+	next_paddr = sg_phys(leader) + leader_length;
 
 	/* we will not marge sg without device. */
 	max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
 	for (++sg; sg < end; ++sg) {
 		unsigned long addr, len;
-		addr = SG_ENT_PHYS_ADDRESS(sg);
+		addr = sg_phys(sg);
 		len = sg->length;
 
 		if (leader_length + len > max_seg_size)
@@ -555,7 +549,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
 	struct scatterlist *out, struct pci_iommu_arena *arena,
 	dma_addr_t max_dma, int dac_allowed)
 {
-	unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
+	unsigned long paddr = sg_phys(leader);
 	long size = leader->dma_length;
 	struct scatterlist *sg;
 	unsigned long *ptes;
@@ -621,7 +615,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
 #endif
 
 		size = sg->length;
-		paddr = SG_ENT_PHYS_ADDRESS(sg);
+		paddr = sg_phys(sg);
 
 		while (sg+1 < end && (int) sg[1].dma_address == -1) {
 			size += sg[1].length;
@@ -636,11 +630,11 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
 
 #if DEBUG_ALLOC > 0
 		DBGA("    (%ld) [%p,%x] np %ld\n",
-		     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
+		     last_sg - leader, sg_phys(last_sg),
 		     last_sg->length, npages);
 		while (++last_sg <= sg) {
 			DBGA("        (%ld) [%p,%x] cont\n",
-			     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
+			     last_sg - leader, sg_phys(last_sg),
 			     last_sg->length);
 		}
 #endif
@@ -668,7 +662,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
 	if (nents == 1) {
 		sg->dma_length = sg->length;
 		sg->dma_address
-		  = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
+		  = pci_map_single_1(pdev, sg_phys(sg),
 				     sg->length, dac_allowed);
 		return sg->dma_address != 0;
 	}
-- 
1.9.1

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: torvalds@linux-foundation.org, axboe@kernel.dk
Cc: dan.j.williams@intel.com, vgupta@synopsys.com,
	hskinnemoen@gmail.com, egtvedt@samfundet.no, realmz6@gmail.com,
	dhowells@redhat.com, monstr@monstr.eu, x86@kernel.org,
	dwmw2@infradead.org, alex.williamson@redhat.com,
	grundler@parisc-linux.org, linux-kernel@vger.kernel.org,
	linux-arch@vger.kernel.org, linux-alpha@vger.kernel.org,
	linux-ia64@vger.kernel.org, linux-metag@vger.kernel.org,
	linux-mips@linux-mips.org, linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	sparclinux@vger.kernel.org, linux-xtensa@linux-xtensa.org,
	linux-nvdimm@ml01.01.org, linux-media@vger.kernel.org
Subject: [PATCH 07/31] alpha/pci_iommu: handle page-less SG entries
Date: Wed, 12 Aug 2015 07:05:26 +0000	[thread overview]
Message-ID: <1439363150-8661-8-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1439363150-8661-1-git-send-email-hch@lst.de>

Use sg_phys() instead of virt_to_phys(sg_virt(sg)) so that we don't
require a kernel virtual address, and switch a few debug printfs to
print physical instead of virtual addresses.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/alpha/kernel/pci_iommu.c | 36 +++++++++++++++---------------------
 1 file changed, 15 insertions(+), 21 deletions(-)

diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index eddee77..5d46b49 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -248,20 +248,17 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
    until either pci_unmap_single or pci_dma_sync_single is performed.  */
 
 static dma_addr_t
-pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
+pci_map_single_1(struct pci_dev *pdev, unsigned long paddr, size_t size,
 		 int dac_allowed)
 {
 	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
 	dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
 	struct pci_iommu_arena *arena;
 	long npages, dma_ofs, i;
-	unsigned long paddr;
 	dma_addr_t ret;
 	unsigned int align = 0;
 	struct device *dev = pdev ? &pdev->dev : NULL;
 
-	paddr = __pa(cpu_addr);
-
 #if !DEBUG_NODIRECT
 	/* First check to see if we can use the direct map window.  */
 	if (paddr + size + __direct_map_base - 1 <= max_dma
@@ -269,7 +266,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
 		ret = paddr + __direct_map_base;
 
 		DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
-		      cpu_addr, size, ret, __builtin_return_address(0));
+		      paddr, size, ret, __builtin_return_address(0));
 
 		return ret;
 	}
@@ -280,7 +277,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
 		ret = paddr + alpha_mv.pci_dac_offset;
 
 		DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
-		      cpu_addr, size, ret, __builtin_return_address(0));
+		      paddr, size, ret, __builtin_return_address(0));
 
 		return ret;
 	}
@@ -309,15 +306,15 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
 		return 0;
 	}
 
+	offset = paddr & ~PAGE_MASK;
 	paddr &= PAGE_MASK;
 	for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
 		arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
 
-	ret = arena->dma_base + dma_ofs * PAGE_SIZE;
-	ret += (unsigned long)cpu_addr & ~PAGE_MASK;
+	ret = arena->dma_base + dma_ofs * PAGE_SIZE + offset;
 
 	DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
-	      cpu_addr, size, npages, ret, __builtin_return_address(0));
+	      paddr, size, npages, ret, __builtin_return_address(0));
 
 	return ret;
 }
@@ -357,7 +354,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
 	BUG_ON(dir = PCI_DMA_NONE);
 
 	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 
-	return pci_map_single_1(pdev, (char *)page_address(page) + offset, 
+	return pci_map_single_1(pdev, page_to_phys(page) + offset,
 				size, dac_allowed);
 }
 
@@ -453,7 +450,7 @@ try_again:
 	}
 	memset(cpu_addr, 0, size);
 
-	*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
+	*dma_addrp = pci_map_single_1(pdev, __pa(cpu_addr), size, 0);
 	if (*dma_addrp = 0) {
 		free_pages((unsigned long)cpu_addr, order);
 		if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
@@ -497,9 +494,6 @@ static void alpha_pci_free_coherent(struct device *dev, size_t size,
    Write dma_length of each leader with the combined lengths of
    the mergable followers.  */
 
-#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
-#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
-
 static void
 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
 	    int virt_ok)
@@ -512,13 +506,13 @@ sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
 	leader = sg;
 	leader_flag = 0;
 	leader_length = leader->length;
-	next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
+	next_paddr = sg_phys(leader) + leader_length;
 
 	/* we will not marge sg without device. */
 	max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
 	for (++sg; sg < end; ++sg) {
 		unsigned long addr, len;
-		addr = SG_ENT_PHYS_ADDRESS(sg);
+		addr = sg_phys(sg);
 		len = sg->length;
 
 		if (leader_length + len > max_seg_size)
@@ -555,7 +549,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
 	struct scatterlist *out, struct pci_iommu_arena *arena,
 	dma_addr_t max_dma, int dac_allowed)
 {
-	unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
+	unsigned long paddr = sg_phys(leader);
 	long size = leader->dma_length;
 	struct scatterlist *sg;
 	unsigned long *ptes;
@@ -621,7 +615,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
 #endif
 
 		size = sg->length;
-		paddr = SG_ENT_PHYS_ADDRESS(sg);
+		paddr = sg_phys(sg);
 
 		while (sg+1 < end && (int) sg[1].dma_address = -1) {
 			size += sg[1].length;
@@ -636,11 +630,11 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
 
 #if DEBUG_ALLOC > 0
 		DBGA("    (%ld) [%p,%x] np %ld\n",
-		     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
+		     last_sg - leader, sg_phys(last_sg),
 		     last_sg->length, npages);
 		while (++last_sg <= sg) {
 			DBGA("        (%ld) [%p,%x] cont\n",
-			     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
+			     last_sg - leader, sg_phys(last_sg),
 			     last_sg->length);
 		}
 #endif
@@ -668,7 +662,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
 	if (nents = 1) {
 		sg->dma_length = sg->length;
 		sg->dma_address
-		  = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
+		  = pci_map_single_1(pdev, sg_phys(sg),
 				     sg->length, dac_allowed);
 		return sg->dma_address != 0;
 	}
-- 
1.9.1


  parent reply	other threads:[~2015-08-12  7:05 UTC|newest]

Thread overview: 134+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-08-12  7:05 RFC: prepare for struct scatterlist entries without page backing Christoph Hellwig
2015-08-12  7:05 ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 01/31] scatterlist: add sg_pfn and sg_has_page helpers Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 02/31] scatterlist: use sg_phys() Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 03/31] dma-debug: handle page-less SG entries Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 04/31] x86/pci-nommu: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 05/31] x86/pci-calgary: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 06/31] alpha/pci-noop: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` Christoph Hellwig [this message]
2015-08-12  7:05   ` [PATCH 07/31] alpha/pci_iommu: " Christoph Hellwig
2015-08-12  7:05 ` [PATCH 08/31] c6x: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 09/31] ia64/pci_dma: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 10/31] powerpc/iommu: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 11/31] sparc/iommu: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 12/31] mn10300: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 13/31] sparc/ldc: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 14/31] sparc32/io-unit: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-14 12:03   ` Josip Rodin
     [not found] ` <1439363150-8661-1-git-send-email-hch-jcswGhMUV9g@public.gmane.org>
2015-08-12  7:05   ` [PATCH 15/31] sparc32/iommu: " Christoph Hellwig
2015-08-12  7:05     ` Christoph Hellwig
2015-08-12  7:05     ` Christoph Hellwig
2015-08-12  7:05   ` [PATCH 20/31] avr32: " Christoph Hellwig
2015-08-12  7:05     ` Christoph Hellwig
2015-08-12  7:05     ` Christoph Hellwig
2015-08-12  9:36     ` Hans-Christian Egtvedt
2015-08-12  9:36       ` Hans-Christian Egtvedt
2015-08-12 12:42   ` RFC: prepare for struct scatterlist entries without page backing Boaz Harrosh
2015-08-12 12:42     ` Boaz Harrosh
2015-08-12 12:42     ` Boaz Harrosh
2015-08-12 23:37     ` Julian Calaby
2015-08-12 23:37       ` Julian Calaby
2015-08-13 14:35       ` Christoph Hellwig
2015-08-13 14:35         ` Christoph Hellwig
2015-08-13 14:35         ` Christoph Hellwig
2015-08-13 23:40         ` Julian Calaby
2015-08-13 23:40           ` Julian Calaby
2015-08-13 23:40           ` Julian Calaby
2015-08-13 14:40     ` Christoph Hellwig
2015-08-13 14:40       ` Christoph Hellwig
2015-08-13 15:42       ` Boaz Harrosh
2015-08-13 15:42         ` Boaz Harrosh
2015-08-12  7:05 ` [PATCH 16/31] s390: handle page-less SG entries Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12 11:51   ` Sebastian Ott
2015-08-12 11:51     ` Sebastian Ott
2015-08-12  7:05 ` [PATCH 17/31] ia64/sba_iommu: remove sba_sg_address Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 18/31] nios2: handle page-less SG entries Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 19/31] arc: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12 10:28   ` Vineet Gupta
2015-08-12 10:28     ` Vineet Gupta
2015-08-12 10:28     ` Vineet Gupta
2015-08-12 10:28     ` Vineet Gupta
2015-08-12  7:05 ` [PATCH 21/31] blackfin: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 22/31] metag: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 23/31] sh: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 24/31] xtensa: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 25/31] frv: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 26/31] openrisc: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 27/31] mips: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 28/31] powerpc: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  7:05 ` [PATCH 29/31] parisc: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12 16:01   ` Linus Torvalds
2015-08-12 16:01     ` Linus Torvalds
2015-08-12 16:01     ` Linus Torvalds
2015-08-12 16:01     ` Linus Torvalds
2015-08-13 14:31     ` Christoph Hellwig
2015-08-13 14:31       ` Christoph Hellwig
2015-08-13 14:31       ` Christoph Hellwig
2015-08-13 14:31       ` Christoph Hellwig
2015-08-14  3:30       ` Dan Williams
2015-08-14  3:30         ` Dan Williams
2015-08-14  3:30         ` Dan Williams
2015-08-14  3:30         ` Dan Williams
2015-08-14  3:59         ` James Bottomley
2015-08-14  3:59           ` James Bottomley
2015-08-14  3:59           ` James Bottomley
2015-08-14  3:59           ` James Bottomley
2015-08-14  4:11           ` David Miller
2015-08-14  4:11             ` David Miller
     [not found]             ` <20150813.211155.1774898831276303437.davem-fT/PcQaiUtIeIZ0/mPfg9Q@public.gmane.org>
2015-08-14 16:17               ` Dan Williams
2015-08-14 16:17                 ` Dan Williams
2015-08-14 16:17                 ` Dan Williams
2015-08-12  7:05 ` [PATCH 30/31] intel-iommu: " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12  9:51   ` David Woodhouse
2015-08-12  9:51     ` David Woodhouse
2015-08-12  9:51     ` David Woodhouse
2015-08-12  7:05 ` [PATCH 31/31] dma-mapping-common: skip kmemleak checks for " Christoph Hellwig
2015-08-12  7:05   ` Christoph Hellwig
2015-08-12 16:05   ` Linus Torvalds
2015-08-12 16:05     ` Linus Torvalds
2015-08-12 16:05     ` Linus Torvalds
2015-08-12 16:05     ` Linus Torvalds
2015-08-13 14:33     ` Christoph Hellwig
2015-08-13 14:33       ` Christoph Hellwig
2015-08-13 14:33       ` Christoph Hellwig
2015-08-13 14:33       ` Christoph Hellwig
2015-08-12 16:26   ` Catalin Marinas
2015-08-12 16:26     ` Catalin Marinas
2015-08-12 16:26     ` Catalin Marinas
2015-08-12 16:26     ` Catalin Marinas
2015-08-12 16:26     ` Catalin Marinas
2015-08-12 16:26     ` Catalin Marinas
2015-08-12 17:00 ` RFC: prepare for struct scatterlist entries without page backing James Bottomley
2015-08-12 17:00   ` James Bottomley
2015-08-12 17:00   ` James Bottomley
2015-08-12 17:56   ` Grant Grundler
2015-08-12 17:56     ` Grant Grundler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1439363150-8661-8-git-send-email-hch@lst.de \
    --to=hch@lst.de \
    --cc=alex.williamson@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=dan.j.williams@intel.com \
    --cc=dhowells@redhat.com \
    --cc=dwmw2@infradead.org \
    --cc=egtvedt@samfundet.no \
    --cc=grundler@parisc-linux.org \
    --cc=hskinnemoen@gmail.com \
    --cc=linux-alpha@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=linux-metag@vger.kernel.org \
    --cc=linux-mips@linux-mips.org \
    --cc=linux-nvdimm@ml01.01.org \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-xtensa@linux-xtensa.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=monstr@monstr.eu \
    --cc=realmz6@gmail.com \
    --cc=sparclinux@vger.kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=vgupta@synopsys.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.