All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Yoshinori Sato <ysato@users.sourceforge.jp>,
	Rich Felker <dalias@libc.org>
Cc: linux-sh@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 05/10] sh: move the ioremap implementation out of line
Date: Tue, 14 Jul 2020 12:18:51 +0000	[thread overview]
Message-ID: <20200714121856.955680-6-hch@lst.de> (raw)
In-Reply-To: <20200714121856.955680-1-hch@lst.de>

Move the internal implementation details of ioremap out of line, no need
to expose any of this to drivers for a slow path API.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/sh/include/asm/io.h | 101 ++++++---------------------------------
 arch/sh/mm/ioremap.c     |  53 ++++++++++++++++++++
 2 files changed, 68 insertions(+), 86 deletions(-)

diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 357a7e0c86d682..da08a61a2f7dae 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -242,109 +242,38 @@ unsigned long long poke_real_address_q(unsigned long long addr,
 #define phys_to_virt(address)	(__va(address))
 #endif
 
-/*
- * On 32-bit SH, we traditionally have the whole physical address space
- * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
- * not need to do anything but place the address in the proper segment.
- * This is true for P1 and P2 addresses, as well as some P3 ones.
- * However, most of the P3 addresses and newer cores using extended
- * addressing need to map through page tables, so the ioremap()
- * implementation becomes a bit more complicated.
- *
- * See arch/sh/mm/ioremap.c for additional notes on this.
- *
- * We cheat a bit and always return uncachable areas until we've fixed
- * the drivers to handle caching properly.
- *
- * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
- * doesn't exist, so everything must go through page tables.
- */
 #ifdef CONFIG_MMU
+void iounmap(void __iomem *addr);
 void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
 			       pgprot_t prot, void *caller);
-void iounmap(void __iomem *addr);
-
-static inline void __iomem *
-__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
-}
-
-static inline void __iomem *
-__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-#ifdef CONFIG_29BIT
-	phys_addr_t last_addr = offset + size - 1;
-
-	/*
-	 * For P1 and P2 space this is trivial, as everything is already
-	 * mapped. Uncached access for P1 addresses are done through P2.
-	 * In the P3 case or for addresses outside of the 29-bit space,
-	 * mapping must be done by the PMB or by using page tables.
-	 */
-	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
-		u64 flags = pgprot_val(prot);
-
-		/*
-		 * Anything using the legacy PTEA space attributes needs
-		 * to be kicked down to page table mappings.
-		 */
-		if (unlikely(flags & _PAGE_PCC_MASK))
-			return NULL;
-		if (unlikely(flags & _PAGE_CACHABLE))
-			return (void __iomem *)P1SEGADDR(offset);
-
-		return (void __iomem *)P2SEGADDR(offset);
-	}
-
-	/* P4 above the store queues are always mapped. */
-	if (unlikely(offset >= P3_ADDR_MAX))
-		return (void __iomem *)P4SEGADDR(offset);
-#endif
-
-	return NULL;
-}
-
-static inline void __iomem *
-__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-	void __iomem *ret;
-
-	ret = __ioremap_trapped(offset, size);
-	if (ret)
-		return ret;
-
-	ret = __ioremap_29bit(offset, size, prot);
-	if (ret)
-		return ret;
-
-	return __ioremap(offset, size, prot);
-}
-#else
-#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
-#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
-static inline void iounmap(void __iomem *addr) {}
-#endif /* CONFIG_MMU */
 
 static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
 {
-	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
+	return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE,
+			__builtin_return_address(0));
 }
 
 static inline void __iomem *
 ioremap_cache(phys_addr_t offset, unsigned long size)
 {
-	return __ioremap_mode(offset, size, PAGE_KERNEL);
+	return __ioremap_caller(offset, size, PAGE_KERNEL,
+			__builtin_return_address(0));
 }
 #define ioremap_cache ioremap_cache
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-static inline void __iomem *
-ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
+static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+		unsigned long flags)
 {
-	return __ioremap_mode(offset, size, __pgprot(flags));
+	return __ioremap_caller(offset, size, __pgprot(flags),
+			__builtin_return_address(0));
 }
-#endif
+#endif /* CONFIG_HAVE_IOREMAP_PROT */
+
+#else /* CONFIG_MMU */
+#define iounmap(addr)		do { } while (0)
+#define ioremap(offset, size)	((void __iomem *)(unsigned long)(offset))
+#endif /* CONFIG_MMU */
 
 #define ioremap_uc	ioremap
 
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index d9ec85b6bb2130..69e55939e48a6f 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -26,6 +26,51 @@
 #include <asm/mmu.h>
 #include "ioremap.h"
 
+/*
+ * On 32-bit SH, we traditionally have the whole physical address space mapped
+ * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
+ * anything but place the address in the proper segment.  This is true for P1
+ * and P2 addresses, as well as some P3 ones.  However, most of the P3 addresses
+ * and newer cores using extended addressing need to map through page tables, so
+ * the ioremap() implementation becomes a bit more complicated.
+ */
+#ifdef CONFIG_29BIT
+static void __iomem *
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
+{
+	phys_addr_t last_addr = offset + size - 1;
+
+	/*
+	 * For P1 and P2 space this is trivial, as everything is already
+	 * mapped. Uncached access for P1 addresses are done through P2.
+	 * In the P3 case or for addresses outside of the 29-bit space,
+	 * mapping must be done by the PMB or by using page tables.
+	 */
+	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
+		u64 flags = pgprot_val(prot);
+
+		/*
+		 * Anything using the legacy PTEA space attributes needs
+		 * to be kicked down to page table mappings.
+		 */
+		if (unlikely(flags & _PAGE_PCC_MASK))
+			return NULL;
+		if (unlikely(flags & _PAGE_CACHABLE))
+			return (void __iomem *)P1SEGADDR(offset);
+
+		return (void __iomem *)P2SEGADDR(offset);
+	}
+
+	/* P4 above the store queues are always mapped. */
+	if (unlikely(offset >= P3_ADDR_MAX))
+		return (void __iomem *)P4SEGADDR(offset);
+
+	return NULL;
+}
+#else
+#define __ioremap_29bit(offset, size, prot)		NULL
+#endif /* CONFIG_29BIT */
+
 /*
  * Remap an arbitrary physical address space into the kernel virtual
  * address space. Needed when the kernel wants to access high addresses
@@ -43,6 +88,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
 	unsigned long offset, last_addr, addr, orig_addr;
 	void __iomem *mapped;
 
+	mapped = __ioremap_trapped(phys_addr, size);
+	if (mapped)
+		return mapped;
+
+	mapped = __ioremap_29bit(phys_addr, size, pgprot);
+	if (mapped)
+		return mapped;
+
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
 	if (!size || last_addr < phys_addr)
-- 
2.26.2

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Yoshinori Sato <ysato@users.sourceforge.jp>,
	Rich Felker <dalias@libc.org>
Cc: linux-sh@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 05/10] sh: move the ioremap implementation out of line
Date: Tue, 14 Jul 2020 14:18:51 +0200	[thread overview]
Message-ID: <20200714121856.955680-6-hch@lst.de> (raw)
In-Reply-To: <20200714121856.955680-1-hch@lst.de>

Move the internal implementation details of ioremap out of line, no need
to expose any of this to drivers for a slow path API.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/sh/include/asm/io.h | 101 ++++++---------------------------------
 arch/sh/mm/ioremap.c     |  53 ++++++++++++++++++++
 2 files changed, 68 insertions(+), 86 deletions(-)

diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 357a7e0c86d682..da08a61a2f7dae 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -242,109 +242,38 @@ unsigned long long poke_real_address_q(unsigned long long addr,
 #define phys_to_virt(address)	(__va(address))
 #endif
 
-/*
- * On 32-bit SH, we traditionally have the whole physical address space
- * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
- * not need to do anything but place the address in the proper segment.
- * This is true for P1 and P2 addresses, as well as some P3 ones.
- * However, most of the P3 addresses and newer cores using extended
- * addressing need to map through page tables, so the ioremap()
- * implementation becomes a bit more complicated.
- *
- * See arch/sh/mm/ioremap.c for additional notes on this.
- *
- * We cheat a bit and always return uncachable areas until we've fixed
- * the drivers to handle caching properly.
- *
- * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
- * doesn't exist, so everything must go through page tables.
- */
 #ifdef CONFIG_MMU
+void iounmap(void __iomem *addr);
 void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
 			       pgprot_t prot, void *caller);
-void iounmap(void __iomem *addr);
-
-static inline void __iomem *
-__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
-}
-
-static inline void __iomem *
-__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-#ifdef CONFIG_29BIT
-	phys_addr_t last_addr = offset + size - 1;
-
-	/*
-	 * For P1 and P2 space this is trivial, as everything is already
-	 * mapped. Uncached access for P1 addresses are done through P2.
-	 * In the P3 case or for addresses outside of the 29-bit space,
-	 * mapping must be done by the PMB or by using page tables.
-	 */
-	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
-		u64 flags = pgprot_val(prot);
-
-		/*
-		 * Anything using the legacy PTEA space attributes needs
-		 * to be kicked down to page table mappings.
-		 */
-		if (unlikely(flags & _PAGE_PCC_MASK))
-			return NULL;
-		if (unlikely(flags & _PAGE_CACHABLE))
-			return (void __iomem *)P1SEGADDR(offset);
-
-		return (void __iomem *)P2SEGADDR(offset);
-	}
-
-	/* P4 above the store queues are always mapped. */
-	if (unlikely(offset >= P3_ADDR_MAX))
-		return (void __iomem *)P4SEGADDR(offset);
-#endif
-
-	return NULL;
-}
-
-static inline void __iomem *
-__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-	void __iomem *ret;
-
-	ret = __ioremap_trapped(offset, size);
-	if (ret)
-		return ret;
-
-	ret = __ioremap_29bit(offset, size, prot);
-	if (ret)
-		return ret;
-
-	return __ioremap(offset, size, prot);
-}
-#else
-#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
-#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
-static inline void iounmap(void __iomem *addr) {}
-#endif /* CONFIG_MMU */
 
 static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
 {
-	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
+	return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE,
+			__builtin_return_address(0));
 }
 
 static inline void __iomem *
 ioremap_cache(phys_addr_t offset, unsigned long size)
 {
-	return __ioremap_mode(offset, size, PAGE_KERNEL);
+	return __ioremap_caller(offset, size, PAGE_KERNEL,
+			__builtin_return_address(0));
 }
 #define ioremap_cache ioremap_cache
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-static inline void __iomem *
-ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
+static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+		unsigned long flags)
 {
-	return __ioremap_mode(offset, size, __pgprot(flags));
+	return __ioremap_caller(offset, size, __pgprot(flags),
+			__builtin_return_address(0));
 }
-#endif
+#endif /* CONFIG_HAVE_IOREMAP_PROT */
+
+#else /* CONFIG_MMU */
+#define iounmap(addr)		do { } while (0)
+#define ioremap(offset, size)	((void __iomem *)(unsigned long)(offset))
+#endif /* CONFIG_MMU */
 
 #define ioremap_uc	ioremap
 
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index d9ec85b6bb2130..69e55939e48a6f 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -26,6 +26,51 @@
 #include <asm/mmu.h>
 #include "ioremap.h"
 
+/*
+ * On 32-bit SH, we traditionally have the whole physical address space mapped
+ * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
+ * anything but place the address in the proper segment.  This is true for P1
+ * and P2 addresses, as well as some P3 ones.  However, most of the P3 addresses
+ * and newer cores using extended addressing need to map through page tables, so
+ * the ioremap() implementation becomes a bit more complicated.
+ */
+#ifdef CONFIG_29BIT
+static void __iomem *
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
+{
+	phys_addr_t last_addr = offset + size - 1;
+
+	/*
+	 * For P1 and P2 space this is trivial, as everything is already
+	 * mapped. Uncached access for P1 addresses are done through P2.
+	 * In the P3 case or for addresses outside of the 29-bit space,
+	 * mapping must be done by the PMB or by using page tables.
+	 */
+	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
+		u64 flags = pgprot_val(prot);
+
+		/*
+		 * Anything using the legacy PTEA space attributes needs
+		 * to be kicked down to page table mappings.
+		 */
+		if (unlikely(flags & _PAGE_PCC_MASK))
+			return NULL;
+		if (unlikely(flags & _PAGE_CACHABLE))
+			return (void __iomem *)P1SEGADDR(offset);
+
+		return (void __iomem *)P2SEGADDR(offset);
+	}
+
+	/* P4 above the store queues are always mapped. */
+	if (unlikely(offset >= P3_ADDR_MAX))
+		return (void __iomem *)P4SEGADDR(offset);
+
+	return NULL;
+}
+#else
+#define __ioremap_29bit(offset, size, prot)		NULL
+#endif /* CONFIG_29BIT */
+
 /*
  * Remap an arbitrary physical address space into the kernel virtual
  * address space. Needed when the kernel wants to access high addresses
@@ -43,6 +88,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
 	unsigned long offset, last_addr, addr, orig_addr;
 	void __iomem *mapped;
 
+	mapped = __ioremap_trapped(phys_addr, size);
+	if (mapped)
+		return mapped;
+
+	mapped = __ioremap_29bit(phys_addr, size, pgprot);
+	if (mapped)
+		return mapped;
+
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
 	if (!size || last_addr < phys_addr)
-- 
2.26.2


  parent reply	other threads:[~2020-07-14 12:18 UTC|newest]

Thread overview: 163+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-24 16:26 ioremap and dma cleanups and fixes for superh Christoph Hellwig
2020-03-24 16:26 ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 01/10] sh: remove -Werror from Makefiles Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 02/10] sh: sort the selects for SUPERH alphabetically Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 03/10] sh: remove __KERNEL__ ifdefs from non-UAPI headers Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 04/10] sh: move ioremap_fixed details out of <asm/io.h> Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 05/10] sh: move the ioremap implementation out of line Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 06/10] sh: don't include <asm/io_trapped.h> in <asm/io.h> Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 07/10] sh: unexport register_trapped_io and match_trapped_io_handler Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 08/10] dma-mapping: consolidate the NO_DMA definition in kernel/dma/Kconfig Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 09/10] sh: don't allow non-coherent DMA for NOMMU Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-03-24 16:26 ` [PATCH 10/10] sh: use the generic dma coherent remap allocator Christoph Hellwig
2020-03-24 16:26   ` Christoph Hellwig
2020-06-26  8:07 ` ioremap and dma cleanups and fixes for superh (resend) Christoph Hellwig
2020-06-26  8:07   ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 01/10] sh: remove -Werror from Makefiles Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 02/10] sh: sort the selects for SUPERH alphabetically Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 03/10] sh: remove __KERNEL__ ifdefs from non-UAPI headers Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 04/10] sh: move ioremap_fixed details out of <asm/io.h> Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 05/10] sh: move the ioremap implementation out of line Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 06/10] sh: don't include <asm/io_trapped.h> in <asm/io.h> Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 07/10] sh: unexport register_trapped_io and match_trapped_io_handler Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 08/10] dma-mapping: consolidate the NO_DMA definition in kernel/dma/Kconfig Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 09/10] sh: don't allow non-coherent DMA for NOMMU Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-06-28  0:53     ` Rob Landley
2020-06-28  1:01       ` Rob Landley
2020-06-28  7:24       ` Christoph Hellwig
2020-06-28  7:24         ` Christoph Hellwig
2020-06-26  8:07   ` [PATCH 10/10] sh: use the generic dma coherent remap allocator Christoph Hellwig
2020-06-26  8:07     ` Christoph Hellwig
2020-07-14 12:18 ` ioremap and dma cleanups and fixes for superh (2nd resend) Christoph Hellwig
2020-07-14 12:18   ` Christoph Hellwig
2020-07-14 12:18   ` [PATCH 01/10] sh: remove -Werror from Makefiles Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-07-14 12:18   ` [PATCH 02/10] sh: sort the selects for SUPERH alphabetically Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-07-14 12:18   ` [PATCH 03/10] sh: remove __KERNEL__ ifdefs from non-UAPI headers Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-07-14 12:18   ` [PATCH 04/10] sh: move ioremap_fixed details out of <asm/io.h> Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-07-14 12:18   ` Christoph Hellwig [this message]
2020-07-14 12:18     ` [PATCH 05/10] sh: move the ioremap implementation out of line Christoph Hellwig
2020-07-14 12:18   ` [PATCH 06/10] sh: don't include <asm/io_trapped.h> in <asm/io.h> Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-07-14 12:18   ` [PATCH 07/10] sh: unexport register_trapped_io and match_trapped_io_handler Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-07-14 12:18   ` [PATCH 08/10] dma-mapping: consolidate the NO_DMA definition in kernel/dma/Kconfig Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-07-21  3:17     ` Rich Felker
2020-07-21  3:17       ` Rich Felker
2020-07-21  5:11       ` Christoph Hellwig
2020-07-21  5:11         ` Christoph Hellwig
2020-07-22  0:43         ` Rich Felker
2020-07-22  0:43           ` Rich Felker
2020-07-14 12:18   ` [PATCH 09/10] sh: don't allow non-coherent DMA for NOMMU Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-08-28  2:00     ` Rich Felker
2020-08-28  2:00       ` Rich Felker
2020-08-28  2:11       ` Rich Felker
2020-08-28  2:11         ` Rich Felker
2020-08-28  4:24         ` Christoph Hellwig
2020-08-28  4:24           ` Christoph Hellwig
2020-08-28  9:26           ` Ulf Hansson
2020-08-28  9:26             ` Ulf Hansson
2020-08-28 15:09             ` Rich Felker
2020-08-28 15:09               ` Rich Felker
2020-08-29  8:31               ` Christoph Hellwig
2020-08-29  8:31                 ` Christoph Hellwig
2020-08-31 11:28                 ` Ulf Hansson
2020-08-31 11:28                   ` Ulf Hansson
2020-07-14 12:18   ` [PATCH 10/10] sh: use the generic dma coherent remap allocator Christoph Hellwig
2020-07-14 12:18     ` Christoph Hellwig
2020-07-14 12:31   ` ioremap and dma cleanups and fixes for superh (2nd resend) John Paul Adrian Glaubitz
2020-07-14 12:31     ` John Paul Adrian Glaubitz
2020-07-14 15:59     ` Rich Felker
2020-07-14 15:59       ` Rich Felker
2020-07-14 16:10       ` John Paul Adrian Glaubitz
2020-07-14 16:10         ` John Paul Adrian Glaubitz
2020-07-14 23:12         ` John Paul Adrian Glaubitz
2020-07-14 23:12           ` John Paul Adrian Glaubitz
2020-07-14 23:14           ` John Paul Adrian Glaubitz
2020-07-14 23:14             ` John Paul Adrian Glaubitz
2020-07-15  3:12           ` Rich Felker
2020-07-15  3:12             ` Rich Felker
2020-07-15  6:39             ` John Paul Adrian Glaubitz
2020-07-15  6:39               ` John Paul Adrian Glaubitz
2020-07-15  7:27           ` Geert Uytterhoeven
2020-07-15  7:27             ` Geert Uytterhoeven
2020-07-15  7:37             ` John Paul Adrian Glaubitz
2020-07-15  7:37               ` John Paul Adrian Glaubitz
2020-07-15  8:06               ` Geert Uytterhoeven
2020-07-15  8:06                 ` Geert Uytterhoeven
2020-07-15  7:46             ` John Paul Adrian Glaubitz
2020-07-15  7:46               ` John Paul Adrian Glaubitz
2020-07-15  7:51               ` John Paul Adrian Glaubitz
2020-07-15  8:11                 ` Geert Uytterhoeven
2020-07-15  8:11                   ` Geert Uytterhoeven
2020-07-15  8:27                   ` John Paul Adrian Glaubitz
2020-07-15  8:27                     ` John Paul Adrian Glaubitz
2020-07-15 14:37                     ` John Paul Adrian Glaubitz
2020-07-15 14:37                       ` John Paul Adrian Glaubitz
2020-07-15 15:39                       ` John Paul Adrian Glaubitz
2020-07-15 15:39                         ` John Paul Adrian Glaubitz
2020-07-15 16:18                       ` John Paul Adrian Glaubitz
2020-07-15 16:18                         ` John Paul Adrian Glaubitz
2020-07-15 18:21                         ` Geert Uytterhoeven
2020-07-15 18:21                           ` Geert Uytterhoeven
2020-07-15 18:27                           ` John Paul Adrian Glaubitz
2020-07-15 18:27                             ` John Paul Adrian Glaubitz
2020-07-16  9:40                           ` Peter Zijlstra
2020-07-16  9:40                             ` Peter Zijlstra
2020-07-16 10:05                             ` John Paul Adrian Glaubitz
2020-07-16 10:05                               ` John Paul Adrian Glaubitz
2020-07-16 10:29                             ` peterz
2020-07-16 10:29                               ` peterz
2020-07-16 10:54                               ` John Paul Adrian Glaubitz
2020-07-16 10:54                                 ` John Paul Adrian Glaubitz
2020-07-16 11:01                                 ` peterz
2020-07-16 11:01                                   ` peterz
2020-07-16 11:03                                   ` John Paul Adrian Glaubitz
2020-07-16 11:03                                     ` John Paul Adrian Glaubitz
2020-07-16 11:37                                     ` peterz
2020-07-16 11:37                                       ` peterz
2020-07-16 12:04                                       ` peterz
2020-07-16 12:04                                         ` peterz
2020-07-16 18:14                                         ` John Paul Adrian Glaubitz
2020-07-16 18:14                                           ` John Paul Adrian Glaubitz
2020-07-16 19:28                                           ` Peter Zijlstra
2020-07-16 19:28                                             ` Peter Zijlstra
2020-07-16 19:33                                             ` John Paul Adrian Glaubitz
2020-07-16 19:33                                               ` John Paul Adrian Glaubitz
2020-07-16 11:30                         ` John Paul Adrian Glaubitz
2020-07-16 11:30                           ` John Paul Adrian Glaubitz
2020-07-15  8:07               ` Geert Uytterhoeven
2020-07-15  8:07                 ` Geert Uytterhoeven
2020-07-16 11:31             ` John Paul Adrian Glaubitz
2020-07-16 11:31               ` John Paul Adrian Glaubitz
2020-07-20 13:38           ` Christoph Hellwig
2020-07-20 13:38             ` Christoph Hellwig
2020-07-20 13:42             ` John Paul Adrian Glaubitz
2020-07-20 13:42               ` John Paul Adrian Glaubitz
2020-07-20 14:53               ` Rich Felker
2020-07-20 14:53                 ` Rich Felker
2020-07-21  3:20                 ` Rich Felker
2020-07-21  3:20                   ` Rich Felker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200714121856.955680-6-hch@lst.de \
    --to=hch@lst.de \
    --cc=dalias@libc.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=ysato@users.sourceforge.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.