All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: linux-arch@vger.kernel.org,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Paul McKenney <paulmck@kernel.org>,
	x86@kernel.org, Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Peter Zijlstra <peterz@infradead.org>,
	Juri Lelli <juri.lelli@redhat.com>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Ben Segall <bsegall@google.com>, Mel Gorman <mgorman@suse.de>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Will Deacon <will@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Linux-MM <linux-mm@kvack.org>,
	Russell King <linux@armlinux.org.uk>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Chris Zankel <chris@zankel.net>,
	Max Filippov <jcmvbkbc@gmail.com>,
	linux-xtensa@linux-xtensa.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	David Airlie <airlied@linux.ie>, Daniel Vetter <daniel@ffwll.ch>,
	intel-gfx <intel-gfx@lists.freedesktop.org>,
	dri-devel <dri-devel@lists.freedesktop.org>,
	Ard Biesheuvel <ardb@kernel.org>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Vineet Gupta <vgupta@synopsys.com>,
	linux-snps-arc@lists.infradead.org, Arnd Bergmann <arnd@arndb.de>,
	Guo Ren <guoren@kernel.org>,
	linux-csky@vger.kernel.org, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	linux-mips@vger.kernel.org, Nick Hu <nickhu@andestech.com>,
	Greentime Hu <green.hu@gmail.com>,
	Vincent Chen <deanbo422@gmail.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	sparclinux@vger.kernel.org
Subject: [patch RFC 15/15] mm/highmem: Provide kmap_temporary*
Date: Sat, 19 Sep 2020 11:18:06 +0200	[thread overview]
Message-ID: <20200919092617.375720378@linutronix.de> (raw)
In-Reply-To: 20200919091751.011116649@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_temporary and iomap_temporary interfaces can be invoked from both
preemptible and atomic context.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or per CPUness. Needs to be done on a case by
case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/iomap.h |   16 ++++++++-
 arch/x86/mm/iomap_32.c       |    7 +---
 include/linux/highmem.h      |   70 +++++++++++++++++++++++++++++++++----------
 mm/highmem.c                 |   18 +++++------
 4 files changed, 80 insertions(+), 31 deletions(-)

--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -13,11 +13,23 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+static inline void __iomem *iomap_atomic_pfn_prot(unsigned long pfn,
+						  pgprot_t prot)
+{
+	preempt_disable();
+	return iomap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void iounmap_temporary(void __iomem *vaddr)
+{
+	kunmap_temporary_indexed((void __force *)vaddr);
+}
 
 static inline void iounmap_atomic(void __iomem *vaddr)
 {
-	kunmap_atomic_indexed((void __force *)vaddr);
+	iounmap_temporary(vaddr);
 	preempt_enable();
 }
 
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	/*
 	 * For non-PAT systems, translate non-WB request to UC- just in
@@ -60,7 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
 	/* Filter out unsupported __PAGE_KERNEL* bits: */
 	pgprot_val(prot) &= __default_kernel_pte_mask;
 
-	preempt_disable();
-	return (void __force __iomem *)kmap_atomic_pfn_prot(pfn, prot);
+	return (void __force __iomem *)__kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(iomap_temporary_pfn_prot);
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -35,9 +35,9 @@ static inline void invalidate_kernel_vma
  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
  */
 #ifdef CONFIG_KMAP_ATOMIC_GENERIC
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot);
-void kunmap_atomic_indexed(void *vaddr);
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot);
+void kunmap_temporary_indexed(void *vaddr);
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next);
 # ifndef ARCH_NEEDS_KMAP_HIGH_GET
 static inline void *arch_kmap_temporary_high_get(struct page *page)
@@ -95,16 +95,35 @@ static inline void kunmap(struct page *p
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
-	preempt_disable();
-	return kmap_atomic_page_prot(page, prot);
+	return __kmap_temporary_page_prot(page, prot);
 }
 
-static inline void *kmap_atomic_pfn(unsigned long pfn)
+static inline void *kmap_temporary_page(struct page *page)
+{
+	return kmap_temporary_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+	return __kmap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_temporary(void *vaddr)
+{
+	kunmap_temporary_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
-	return kmap_atomic_pfn_prot(pfn, kmap_prot);
+	return kmap_temporary_page_prot(page, prot);
 }
 
 static inline void *kmap_atomic(struct page *page)
@@ -112,9 +131,10 @@ static inline void *kmap_atomic(struct p
 	return kmap_atomic_prot(page, kmap_prot);
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void *kmap_atomic_pfn(unsigned long pfn)
 {
-	kumap_atomic_indexed(addr);
+	preempt_disable();
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
 }
 
 /* declarations for linux/mm/highmem.c */
@@ -177,6 +197,22 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_temporary_page(struct page *page)
+{
+	pagefault_disable();
+	return page_address(page);
+}
+
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_temporary_page(page);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_page(pfn_to_page(pfn));
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -194,12 +230,8 @@ static inline void *kmap_atomic_pfn(unsi
 	return kmap_atomic(pfn_to_page(pfn));
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_temporary(void *addr)
 {
-	/*
-	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
-	 * handles preemption
-	 */
 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 	kunmap_flush_on_unmap(addr);
 #endif
@@ -217,10 +249,16 @@ static inline void __kunmap_atomic(void
 #define kunmap_atomic(addr)						\
 	do {								\
 		BUILD_BUG_ON(__same_type((addr), struct page *));	\
-		__kunmap_atomic(addr);					\
+		__kunmap_temporary(addr);				\
 		preempt_enable();					\
 	} while (0)
 
+#define kunmap_temporary(addr)						\
+	do {								\
+		BUILD_BUG_ON(__same_type((addr), struct page *));	\
+		__kunmap_temporary(addr);				\
+	} while (0)
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -432,7 +432,7 @@ static pte_t *kmap_get_pte(void)
 	return __kmap_pte;
 }
 
-static void *__kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+static void *do_kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pte_t pteval, *kmap_pte = kmap_get_pte();
 	unsigned long vaddr;
@@ -451,14 +451,14 @@ static void *__kmap_atomic_pfn_prot(unsi
 	return (void *)vaddr;
 }
 
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pagefault_disable();
-	return __kmap_atomic_pfn_prot(pfn, prot);
+	return do_kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL(kmap_atomic_pfn_prot);
+EXPORT_SYMBOL(__kmap_temporary_pfn_prot);
 
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot)
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
 	void *kmap;
 
@@ -471,11 +471,11 @@ void *kmap_atomic_page_prot(struct page
 	if (kmap)
 		return kmap;
 
-	return __kmap_atomic_pfn_prot(page_to_pfn(page), prot);
+	return do_kmap_temporary_pfn_prot(page_to_pfn(page), prot);
 }
-EXPORT_SYMBOL(kmap_atomic_page_prot);
+EXPORT_SYMBOL(__kmap_temporary_page_prot);
 
-void kunmap_atomic_indexed(void *vaddr)
+void kunmap_temporary_indexed(void *vaddr)
 {
 	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
 	pte_t *kmap_pte = kmap_get_pte();
@@ -503,7 +503,7 @@ void kunmap_atomic_indexed(void *vaddr)
 	preempt_enable();
 	pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_indexed);
+EXPORT_SYMBOL(kunmap_temporary_indexed);
 
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next)
 {


WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	dri-devel <dri-devel@lists.freedesktop.org>,
	Ben Segall <bsegall@google.com>,
	Max Filippov <jcmvbkbc@gmail.com>, Guo Ren <guoren@kernel.org>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	linux-arch@vger.kernel.org,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, David Airlie <airlied@linux.ie>,
	Mel Gorman <mgorman@suse.de>,
	linux-snps-arc@lists.infradead.org,
	linux-xtensa@linux-xtensa.org, Paul McKenney <paulmck@kernel.org>,
	intel-gfx <intel-gfx@lists.freedesktop.org>,
	linuxppc-dev@lists.ozlabs.org,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>, Linux-MM <linux-mm@kvack.org>,
	Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Paul Mackerras <paulus@samba.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	"David S. Miller" <davem@davemloft.net>,
	Greentime Hu <green.hu@gmail.com>
Subject: [patch RFC 15/15] mm/highmem: Provide kmap_temporary*
Date: Sat, 19 Sep 2020 09:18:06 +0000	[thread overview]
Message-ID: <20200919092617.375720378@linutronix.de> (raw)
In-Reply-To: 20200919091751.011116649@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_temporary and iomap_temporary interfaces can be invoked from both
preemptible and atomic context.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or per CPUness. Needs to be done on a case by
case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/iomap.h |   16 ++++++++-
 arch/x86/mm/iomap_32.c       |    7 +---
 include/linux/highmem.h      |   70 +++++++++++++++++++++++++++++++++----------
 mm/highmem.c                 |   18 +++++------
 4 files changed, 80 insertions(+), 31 deletions(-)

--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -13,11 +13,23 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+static inline void __iomem *iomap_atomic_pfn_prot(unsigned long pfn,
+						  pgprot_t prot)
+{
+	preempt_disable();
+	return iomap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void iounmap_temporary(void __iomem *vaddr)
+{
+	kunmap_temporary_indexed((void __force *)vaddr);
+}
 
 static inline void iounmap_atomic(void __iomem *vaddr)
 {
-	kunmap_atomic_indexed((void __force *)vaddr);
+	iounmap_temporary(vaddr);
 	preempt_enable();
 }
 
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	/*
 	 * For non-PAT systems, translate non-WB request to UC- just in
@@ -60,7 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
 	/* Filter out unsupported __PAGE_KERNEL* bits: */
 	pgprot_val(prot) &= __default_kernel_pte_mask;
 
-	preempt_disable();
-	return (void __force __iomem *)kmap_atomic_pfn_prot(pfn, prot);
+	return (void __force __iomem *)__kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(iomap_temporary_pfn_prot);
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -35,9 +35,9 @@ static inline void invalidate_kernel_vma
  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
  */
 #ifdef CONFIG_KMAP_ATOMIC_GENERIC
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot);
-void kunmap_atomic_indexed(void *vaddr);
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot);
+void kunmap_temporary_indexed(void *vaddr);
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next);
 # ifndef ARCH_NEEDS_KMAP_HIGH_GET
 static inline void *arch_kmap_temporary_high_get(struct page *page)
@@ -95,16 +95,35 @@ static inline void kunmap(struct page *p
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
-	preempt_disable();
-	return kmap_atomic_page_prot(page, prot);
+	return __kmap_temporary_page_prot(page, prot);
 }
 
-static inline void *kmap_atomic_pfn(unsigned long pfn)
+static inline void *kmap_temporary_page(struct page *page)
+{
+	return kmap_temporary_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+	return __kmap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_temporary(void *vaddr)
+{
+	kunmap_temporary_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
-	return kmap_atomic_pfn_prot(pfn, kmap_prot);
+	return kmap_temporary_page_prot(page, prot);
 }
 
 static inline void *kmap_atomic(struct page *page)
@@ -112,9 +131,10 @@ static inline void *kmap_atomic(struct p
 	return kmap_atomic_prot(page, kmap_prot);
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void *kmap_atomic_pfn(unsigned long pfn)
 {
-	kumap_atomic_indexed(addr);
+	preempt_disable();
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
 }
 
 /* declarations for linux/mm/highmem.c */
@@ -177,6 +197,22 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_temporary_page(struct page *page)
+{
+	pagefault_disable();
+	return page_address(page);
+}
+
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_temporary_page(page);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_page(pfn_to_page(pfn));
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -194,12 +230,8 @@ static inline void *kmap_atomic_pfn(unsi
 	return kmap_atomic(pfn_to_page(pfn));
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_temporary(void *addr)
 {
-	/*
-	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
-	 * handles preemption
-	 */
 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 	kunmap_flush_on_unmap(addr);
 #endif
@@ -217,10 +249,16 @@ static inline void __kunmap_atomic(void
 #define kunmap_atomic(addr)						\
 	do {								\
 		BUILD_BUG_ON(__same_type((addr), struct page *));	\
-		__kunmap_atomic(addr);					\
+		__kunmap_temporary(addr);				\
 		preempt_enable();					\
 	} while (0)
 
+#define kunmap_temporary(addr)						\
+	do {								\
+		BUILD_BUG_ON(__same_type((addr), struct page *));	\
+		__kunmap_temporary(addr);				\
+	} while (0)
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -432,7 +432,7 @@ static pte_t *kmap_get_pte(void)
 	return __kmap_pte;
 }
 
-static void *__kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+static void *do_kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pte_t pteval, *kmap_pte = kmap_get_pte();
 	unsigned long vaddr;
@@ -451,14 +451,14 @@ static void *__kmap_atomic_pfn_prot(unsi
 	return (void *)vaddr;
 }
 
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pagefault_disable();
-	return __kmap_atomic_pfn_prot(pfn, prot);
+	return do_kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL(kmap_atomic_pfn_prot);
+EXPORT_SYMBOL(__kmap_temporary_pfn_prot);
 
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot)
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
 	void *kmap;
 
@@ -471,11 +471,11 @@ void *kmap_atomic_page_prot(struct page
 	if (kmap)
 		return kmap;
 
-	return __kmap_atomic_pfn_prot(page_to_pfn(page), prot);
+	return do_kmap_temporary_pfn_prot(page_to_pfn(page), prot);
 }
-EXPORT_SYMBOL(kmap_atomic_page_prot);
+EXPORT_SYMBOL(__kmap_temporary_page_prot);
 
-void kunmap_atomic_indexed(void *vaddr)
+void kunmap_temporary_indexed(void *vaddr)
 {
 	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
 	pte_t *kmap_pte = kmap_get_pte();
@@ -503,7 +503,7 @@ void kunmap_atomic_indexed(void *vaddr)
 	preempt_enable();
 	pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_indexed);
+EXPORT_SYMBOL(kunmap_temporary_indexed);
 
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next)
 {

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel <dri-devel@lists.freedesktop.org>,
	Ben Segall <bsegall@google.com>,
	Max Filippov <jcmvbkbc@gmail.com>, Guo Ren <guoren@kernel.org>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	linux-arch@vger.kernel.org,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, David Airlie <airlied@linux.ie>,
	Mel Gorman <mgorman@suse.de>,
	linux-snps-arc@lists.infradead.org,
	linux-xtensa@linux-xtensa.org, Paul McKenney <paulmck@kernel.org>,
	intel-gfx <intel-gfx@lists.freedesktop.org>,
	linuxppc-dev@lists.ozlabs.org,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>, Linux-MM <linux-mm@kvack.org>,
	Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Daniel Vetter <daniel@ffwll.ch>,
	Paul Mackerras <paulus@samba.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	"David S. Miller" <davem@davemloft.net>,
	Greentime Hu <green.hu@gmail.com>
Subject: [patch RFC 15/15] mm/highmem: Provide kmap_temporary*
Date: Sat, 19 Sep 2020 11:18:06 +0200	[thread overview]
Message-ID: <20200919092617.375720378@linutronix.de> (raw)
In-Reply-To: 20200919091751.011116649@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_temporary and iomap_temporary interfaces can be invoked from both
preemptible and atomic context.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or per CPUness. Needs to be done on a case by
case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/iomap.h |   16 ++++++++-
 arch/x86/mm/iomap_32.c       |    7 +---
 include/linux/highmem.h      |   70 +++++++++++++++++++++++++++++++++----------
 mm/highmem.c                 |   18 +++++------
 4 files changed, 80 insertions(+), 31 deletions(-)

--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -13,11 +13,23 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+static inline void __iomem *iomap_atomic_pfn_prot(unsigned long pfn,
+						  pgprot_t prot)
+{
+	preempt_disable();
+	return iomap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void iounmap_temporary(void __iomem *vaddr)
+{
+	kunmap_temporary_indexed((void __force *)vaddr);
+}
 
 static inline void iounmap_atomic(void __iomem *vaddr)
 {
-	kunmap_atomic_indexed((void __force *)vaddr);
+	iounmap_temporary(vaddr);
 	preempt_enable();
 }
 
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	/*
 	 * For non-PAT systems, translate non-WB request to UC- just in
@@ -60,7 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
 	/* Filter out unsupported __PAGE_KERNEL* bits: */
 	pgprot_val(prot) &= __default_kernel_pte_mask;
 
-	preempt_disable();
-	return (void __force __iomem *)kmap_atomic_pfn_prot(pfn, prot);
+	return (void __force __iomem *)__kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(iomap_temporary_pfn_prot);
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -35,9 +35,9 @@ static inline void invalidate_kernel_vma
  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
  */
 #ifdef CONFIG_KMAP_ATOMIC_GENERIC
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot);
-void kunmap_atomic_indexed(void *vaddr);
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot);
+void kunmap_temporary_indexed(void *vaddr);
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next);
 # ifndef ARCH_NEEDS_KMAP_HIGH_GET
 static inline void *arch_kmap_temporary_high_get(struct page *page)
@@ -95,16 +95,35 @@ static inline void kunmap(struct page *p
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
-	preempt_disable();
-	return kmap_atomic_page_prot(page, prot);
+	return __kmap_temporary_page_prot(page, prot);
 }
 
-static inline void *kmap_atomic_pfn(unsigned long pfn)
+static inline void *kmap_temporary_page(struct page *page)
+{
+	return kmap_temporary_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+	return __kmap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_temporary(void *vaddr)
+{
+	kunmap_temporary_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
-	return kmap_atomic_pfn_prot(pfn, kmap_prot);
+	return kmap_temporary_page_prot(page, prot);
 }
 
 static inline void *kmap_atomic(struct page *page)
@@ -112,9 +131,10 @@ static inline void *kmap_atomic(struct p
 	return kmap_atomic_prot(page, kmap_prot);
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void *kmap_atomic_pfn(unsigned long pfn)
 {
-	kumap_atomic_indexed(addr);
+	preempt_disable();
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
 }
 
 /* declarations for linux/mm/highmem.c */
@@ -177,6 +197,22 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_temporary_page(struct page *page)
+{
+	pagefault_disable();
+	return page_address(page);
+}
+
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_temporary_page(page);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_page(pfn_to_page(pfn));
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -194,12 +230,8 @@ static inline void *kmap_atomic_pfn(unsi
 	return kmap_atomic(pfn_to_page(pfn));
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_temporary(void *addr)
 {
-	/*
-	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
-	 * handles preemption
-	 */
 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 	kunmap_flush_on_unmap(addr);
 #endif
@@ -217,10 +249,16 @@ static inline void __kunmap_atomic(void
 #define kunmap_atomic(addr)						\
 	do {								\
 		BUILD_BUG_ON(__same_type((addr), struct page *));	\
-		__kunmap_atomic(addr);					\
+		__kunmap_temporary(addr);				\
 		preempt_enable();					\
 	} while (0)
 
+#define kunmap_temporary(addr)						\
+	do {								\
+		BUILD_BUG_ON(__same_type((addr), struct page *));	\
+		__kunmap_temporary(addr);				\
+	} while (0)
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -432,7 +432,7 @@ static pte_t *kmap_get_pte(void)
 	return __kmap_pte;
 }
 
-static void *__kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+static void *do_kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pte_t pteval, *kmap_pte = kmap_get_pte();
 	unsigned long vaddr;
@@ -451,14 +451,14 @@ static void *__kmap_atomic_pfn_prot(unsi
 	return (void *)vaddr;
 }
 
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pagefault_disable();
-	return __kmap_atomic_pfn_prot(pfn, prot);
+	return do_kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL(kmap_atomic_pfn_prot);
+EXPORT_SYMBOL(__kmap_temporary_pfn_prot);
 
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot)
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
 	void *kmap;
 
@@ -471,11 +471,11 @@ void *kmap_atomic_page_prot(struct page
 	if (kmap)
 		return kmap;
 
-	return __kmap_atomic_pfn_prot(page_to_pfn(page), prot);
+	return do_kmap_temporary_pfn_prot(page_to_pfn(page), prot);
 }
-EXPORT_SYMBOL(kmap_atomic_page_prot);
+EXPORT_SYMBOL(__kmap_temporary_page_prot);
 
-void kunmap_atomic_indexed(void *vaddr)
+void kunmap_temporary_indexed(void *vaddr)
 {
 	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
 	pte_t *kmap_pte = kmap_get_pte();
@@ -503,7 +503,7 @@ void kunmap_atomic_indexed(void *vaddr)
 	preempt_enable();
 	pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_indexed);
+EXPORT_SYMBOL(kunmap_temporary_indexed);
 
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next)
 {


WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel <dri-devel@lists.freedesktop.org>,
	Ben Segall <bsegall@google.com>,
	Max Filippov <jcmvbkbc@gmail.com>, Guo Ren <guoren@kernel.org>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	linux-arch@vger.kernel.org,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, David Airlie <airlied@linux.ie>,
	Mel Gorman <mgorman@suse.de>,
	linux-snps-arc@lists.infradead.org,
	linux-xtensa@linux-xtensa.org, Paul McKenney <paulmck@kernel.org>,
	intel-gfx <intel-gfx@lists.freedesktop.org>,
	linuxppc-dev@lists.ozlabs.org,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>, Linux-MM <linux-mm@kvack.org>,
	Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Daniel Vetter <daniel@ffwll.ch>,
	Paul Mackerras <paulus@samba.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	"David S. Miller" <davem@davemloft.net>,
	Greentime Hu <green.hu@gmail.com>
Subject: [patch RFC 15/15] mm/highmem: Provide kmap_temporary*
Date: Sat, 19 Sep 2020 11:18:06 +0200	[thread overview]
Message-ID: <20200919092617.375720378@linutronix.de> (raw)
In-Reply-To: 20200919091751.011116649@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_temporary and iomap_temporary interfaces can be invoked from both
preemptible and atomic context.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or per CPUness. Needs to be done on a case by
case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/iomap.h |   16 ++++++++-
 arch/x86/mm/iomap_32.c       |    7 +---
 include/linux/highmem.h      |   70 +++++++++++++++++++++++++++++++++----------
 mm/highmem.c                 |   18 +++++------
 4 files changed, 80 insertions(+), 31 deletions(-)

--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -13,11 +13,23 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+static inline void __iomem *iomap_atomic_pfn_prot(unsigned long pfn,
+						  pgprot_t prot)
+{
+	preempt_disable();
+	return iomap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void iounmap_temporary(void __iomem *vaddr)
+{
+	kunmap_temporary_indexed((void __force *)vaddr);
+}
 
 static inline void iounmap_atomic(void __iomem *vaddr)
 {
-	kunmap_atomic_indexed((void __force *)vaddr);
+	iounmap_temporary(vaddr);
 	preempt_enable();
 }
 
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	/*
 	 * For non-PAT systems, translate non-WB request to UC- just in
@@ -60,7 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
 	/* Filter out unsupported __PAGE_KERNEL* bits: */
 	pgprot_val(prot) &= __default_kernel_pte_mask;
 
-	preempt_disable();
-	return (void __force __iomem *)kmap_atomic_pfn_prot(pfn, prot);
+	return (void __force __iomem *)__kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(iomap_temporary_pfn_prot);
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -35,9 +35,9 @@ static inline void invalidate_kernel_vma
  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
  */
 #ifdef CONFIG_KMAP_ATOMIC_GENERIC
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot);
-void kunmap_atomic_indexed(void *vaddr);
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot);
+void kunmap_temporary_indexed(void *vaddr);
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next);
 # ifndef ARCH_NEEDS_KMAP_HIGH_GET
 static inline void *arch_kmap_temporary_high_get(struct page *page)
@@ -95,16 +95,35 @@ static inline void kunmap(struct page *p
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
-	preempt_disable();
-	return kmap_atomic_page_prot(page, prot);
+	return __kmap_temporary_page_prot(page, prot);
 }
 
-static inline void *kmap_atomic_pfn(unsigned long pfn)
+static inline void *kmap_temporary_page(struct page *page)
+{
+	return kmap_temporary_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+	return __kmap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_temporary(void *vaddr)
+{
+	kunmap_temporary_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
-	return kmap_atomic_pfn_prot(pfn, kmap_prot);
+	return kmap_temporary_page_prot(page, prot);
 }
 
 static inline void *kmap_atomic(struct page *page)
@@ -112,9 +131,10 @@ static inline void *kmap_atomic(struct p
 	return kmap_atomic_prot(page, kmap_prot);
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void *kmap_atomic_pfn(unsigned long pfn)
 {
-	kumap_atomic_indexed(addr);
+	preempt_disable();
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
 }
 
 /* declarations for linux/mm/highmem.c */
@@ -177,6 +197,22 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_temporary_page(struct page *page)
+{
+	pagefault_disable();
+	return page_address(page);
+}
+
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_temporary_page(page);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_page(pfn_to_page(pfn));
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -194,12 +230,8 @@ static inline void *kmap_atomic_pfn(unsi
 	return kmap_atomic(pfn_to_page(pfn));
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_temporary(void *addr)
 {
-	/*
-	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
-	 * handles preemption
-	 */
 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 	kunmap_flush_on_unmap(addr);
 #endif
@@ -217,10 +249,16 @@ static inline void __kunmap_atomic(void
 #define kunmap_atomic(addr)						\
 	do {								\
 		BUILD_BUG_ON(__same_type((addr), struct page *));	\
-		__kunmap_atomic(addr);					\
+		__kunmap_temporary(addr);				\
 		preempt_enable();					\
 	} while (0)
 
+#define kunmap_temporary(addr)						\
+	do {								\
+		BUILD_BUG_ON(__same_type((addr), struct page *));	\
+		__kunmap_temporary(addr);				\
+	} while (0)
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -432,7 +432,7 @@ static pte_t *kmap_get_pte(void)
 	return __kmap_pte;
 }
 
-static void *__kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+static void *do_kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pte_t pteval, *kmap_pte = kmap_get_pte();
 	unsigned long vaddr;
@@ -451,14 +451,14 @@ static void *__kmap_atomic_pfn_prot(unsi
 	return (void *)vaddr;
 }
 
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pagefault_disable();
-	return __kmap_atomic_pfn_prot(pfn, prot);
+	return do_kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL(kmap_atomic_pfn_prot);
+EXPORT_SYMBOL(__kmap_temporary_pfn_prot);
 
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot)
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
 	void *kmap;
 
@@ -471,11 +471,11 @@ void *kmap_atomic_page_prot(struct page
 	if (kmap)
 		return kmap;
 
-	return __kmap_atomic_pfn_prot(page_to_pfn(page), prot);
+	return do_kmap_temporary_pfn_prot(page_to_pfn(page), prot);
 }
-EXPORT_SYMBOL(kmap_atomic_page_prot);
+EXPORT_SYMBOL(__kmap_temporary_page_prot);
 
-void kunmap_atomic_indexed(void *vaddr)
+void kunmap_temporary_indexed(void *vaddr)
 {
 	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
 	pte_t *kmap_pte = kmap_get_pte();
@@ -503,7 +503,7 @@ void kunmap_atomic_indexed(void *vaddr)
 	preempt_enable();
 	pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_indexed);
+EXPORT_SYMBOL(kunmap_temporary_indexed);
 
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next)
 {


_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel <dri-devel@lists.freedesktop.org>,
	Ben Segall <bsegall@google.com>,
	Max Filippov <jcmvbkbc@gmail.com>, Guo Ren <guoren@kernel.org>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	linux-arch@vger.kernel.org,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, David Airlie <airlied@linux.ie>,
	Mel Gorman <mgorman@suse.de>,
	linux-snps-arc@lists.infradead.org,
	linux-xtensa@linux-xtensa.org, Paul McKenney <paulmck@kernel.org>,
	intel-gfx <intel-gfx@lists.freedesktop.org>,
	linuxppc-dev@lists.ozlabs.org,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>, Linux-MM <linux-mm@kvack.org>,
	Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Daniel Vetter <daniel@ffwll.ch>,
	Paul Mackerras <paulus@samba.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	"David S. Miller" <davem@davemloft.net>,
	Greentime Hu <green.hu@gmail.com>
Subject: [patch RFC 15/15] mm/highmem: Provide kmap_temporary*
Date: Sat, 19 Sep 2020 11:18:06 +0200	[thread overview]
Message-ID: <20200919092617.375720378@linutronix.de> (raw)
In-Reply-To: 20200919091751.011116649@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_temporary and iomap_temporary interfaces can be invoked from both
preemptible and atomic context.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or per CPUness. Needs to be done on a case by
case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/iomap.h |   16 ++++++++-
 arch/x86/mm/iomap_32.c       |    7 +---
 include/linux/highmem.h      |   70 +++++++++++++++++++++++++++++++++----------
 mm/highmem.c                 |   18 +++++------
 4 files changed, 80 insertions(+), 31 deletions(-)

--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -13,11 +13,23 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+static inline void __iomem *iomap_atomic_pfn_prot(unsigned long pfn,
+						  pgprot_t prot)
+{
+	preempt_disable();
+	return iomap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void iounmap_temporary(void __iomem *vaddr)
+{
+	kunmap_temporary_indexed((void __force *)vaddr);
+}
 
 static inline void iounmap_atomic(void __iomem *vaddr)
 {
-	kunmap_atomic_indexed((void __force *)vaddr);
+	iounmap_temporary(vaddr);
 	preempt_enable();
 }
 
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	/*
 	 * For non-PAT systems, translate non-WB request to UC- just in
@@ -60,7 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
 	/* Filter out unsupported __PAGE_KERNEL* bits: */
 	pgprot_val(prot) &= __default_kernel_pte_mask;
 
-	preempt_disable();
-	return (void __force __iomem *)kmap_atomic_pfn_prot(pfn, prot);
+	return (void __force __iomem *)__kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(iomap_temporary_pfn_prot);
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -35,9 +35,9 @@ static inline void invalidate_kernel_vma
  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
  */
 #ifdef CONFIG_KMAP_ATOMIC_GENERIC
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot);
-void kunmap_atomic_indexed(void *vaddr);
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot);
+void kunmap_temporary_indexed(void *vaddr);
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next);
 # ifndef ARCH_NEEDS_KMAP_HIGH_GET
 static inline void *arch_kmap_temporary_high_get(struct page *page)
@@ -95,16 +95,35 @@ static inline void kunmap(struct page *p
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
-	preempt_disable();
-	return kmap_atomic_page_prot(page, prot);
+	return __kmap_temporary_page_prot(page, prot);
 }
 
-static inline void *kmap_atomic_pfn(unsigned long pfn)
+static inline void *kmap_temporary_page(struct page *page)
+{
+	return kmap_temporary_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+	return __kmap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_temporary(void *vaddr)
+{
+	kunmap_temporary_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
-	return kmap_atomic_pfn_prot(pfn, kmap_prot);
+	return kmap_temporary_page_prot(page, prot);
 }
 
 static inline void *kmap_atomic(struct page *page)
@@ -112,9 +131,10 @@ static inline void *kmap_atomic(struct p
 	return kmap_atomic_prot(page, kmap_prot);
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void *kmap_atomic_pfn(unsigned long pfn)
 {
-	kumap_atomic_indexed(addr);
+	preempt_disable();
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
 }
 
 /* declarations for linux/mm/highmem.c */
@@ -177,6 +197,22 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_temporary_page(struct page *page)
+{
+	pagefault_disable();
+	return page_address(page);
+}
+
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_temporary_page(page);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_page(pfn_to_page(pfn));
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -194,12 +230,8 @@ static inline void *kmap_atomic_pfn(unsi
 	return kmap_atomic(pfn_to_page(pfn));
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_temporary(void *addr)
 {
-	/*
-	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
-	 * handles preemption
-	 */
 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 	kunmap_flush_on_unmap(addr);
 #endif
@@ -217,10 +249,16 @@ static inline void __kunmap_atomic(void
 #define kunmap_atomic(addr)						\
 	do {								\
 		BUILD_BUG_ON(__same_type((addr), struct page *));	\
-		__kunmap_atomic(addr);					\
+		__kunmap_temporary(addr);				\
 		preempt_enable();					\
 	} while (0)
 
+#define kunmap_temporary(addr)						\
+	do {								\
+		BUILD_BUG_ON(__same_type((addr), struct page *));	\
+		__kunmap_temporary(addr);				\
+	} while (0)
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -432,7 +432,7 @@ static pte_t *kmap_get_pte(void)
 	return __kmap_pte;
 }
 
-static void *__kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+static void *do_kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pte_t pteval, *kmap_pte = kmap_get_pte();
 	unsigned long vaddr;
@@ -451,14 +451,14 @@ static void *__kmap_atomic_pfn_prot(unsi
 	return (void *)vaddr;
 }
 
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pagefault_disable();
-	return __kmap_atomic_pfn_prot(pfn, prot);
+	return do_kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL(kmap_atomic_pfn_prot);
+EXPORT_SYMBOL(__kmap_temporary_pfn_prot);
 
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot)
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
 	void *kmap;
 
@@ -471,11 +471,11 @@ void *kmap_atomic_page_prot(struct page
 	if (kmap)
 		return kmap;
 
-	return __kmap_atomic_pfn_prot(page_to_pfn(page), prot);
+	return do_kmap_temporary_pfn_prot(page_to_pfn(page), prot);
 }
-EXPORT_SYMBOL(kmap_atomic_page_prot);
+EXPORT_SYMBOL(__kmap_temporary_page_prot);
 
-void kunmap_atomic_indexed(void *vaddr)
+void kunmap_temporary_indexed(void *vaddr)
 {
 	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
 	pte_t *kmap_pte = kmap_get_pte();
@@ -503,7 +503,7 @@ void kunmap_atomic_indexed(void *vaddr)
 	preempt_enable();
 	pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_indexed);
+EXPORT_SYMBOL(kunmap_temporary_indexed);
 
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next)
 {


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	dri-devel <dri-devel@lists.freedesktop.org>,
	Ben Segall <bsegall@google.com>,
	Max Filippov <jcmvbkbc@gmail.com>, Guo Ren <guoren@kernel.org>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	linux-arch@vger.kernel.org,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, David Airlie <airlied@linux.ie>,
	Mel Gorman <mgorman@suse.de>,
	linux-snps-arc@lists.infradead.org,
	linux-xtensa@linux-xtensa.org, Paul McKenney <paulmck@kernel.org>,
	intel-gfx <intel-gfx@lists.freedesktop.org>,
	linuxppc-dev@lists.ozlabs.org,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>, Linux-MM <linux-mm@kvack.org>,
	Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Paul Mackerras <paulus@samba.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	"David S. Miller" <davem@davemloft.net>,
	Greentime Hu <green.hu@gmail.com>
Subject: [patch RFC 15/15] mm/highmem: Provide kmap_temporary*
Date: Sat, 19 Sep 2020 11:18:06 +0200	[thread overview]
Message-ID: <20200919092617.375720378@linutronix.de> (raw)
In-Reply-To: 20200919091751.011116649@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_temporary and iomap_temporary interfaces can be invoked from both
preemptible and atomic context.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or per CPUness. Needs to be done on a case by
case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/iomap.h |   16 ++++++++-
 arch/x86/mm/iomap_32.c       |    7 +---
 include/linux/highmem.h      |   70 +++++++++++++++++++++++++++++++++----------
 mm/highmem.c                 |   18 +++++------
 4 files changed, 80 insertions(+), 31 deletions(-)

--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -13,11 +13,23 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+static inline void __iomem *iomap_atomic_pfn_prot(unsigned long pfn,
+						  pgprot_t prot)
+{
+	preempt_disable();
+	return iomap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void iounmap_temporary(void __iomem *vaddr)
+{
+	kunmap_temporary_indexed((void __force *)vaddr);
+}
 
 static inline void iounmap_atomic(void __iomem *vaddr)
 {
-	kunmap_atomic_indexed((void __force *)vaddr);
+	iounmap_temporary(vaddr);
 	preempt_enable();
 }
 
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	/*
 	 * For non-PAT systems, translate non-WB request to UC- just in
@@ -60,7 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
 	/* Filter out unsupported __PAGE_KERNEL* bits: */
 	pgprot_val(prot) &= __default_kernel_pte_mask;
 
-	preempt_disable();
-	return (void __force __iomem *)kmap_atomic_pfn_prot(pfn, prot);
+	return (void __force __iomem *)__kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(iomap_temporary_pfn_prot);
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -35,9 +35,9 @@ static inline void invalidate_kernel_vma
  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
  */
 #ifdef CONFIG_KMAP_ATOMIC_GENERIC
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot);
-void kunmap_atomic_indexed(void *vaddr);
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot);
+void kunmap_temporary_indexed(void *vaddr);
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next);
 # ifndef ARCH_NEEDS_KMAP_HIGH_GET
 static inline void *arch_kmap_temporary_high_get(struct page *page)
@@ -95,16 +95,35 @@ static inline void kunmap(struct page *p
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
-	preempt_disable();
-	return kmap_atomic_page_prot(page, prot);
+	return __kmap_temporary_page_prot(page, prot);
 }
 
-static inline void *kmap_atomic_pfn(unsigned long pfn)
+static inline void *kmap_temporary_page(struct page *page)
+{
+	return kmap_temporary_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+	return __kmap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_temporary(void *vaddr)
+{
+	kunmap_temporary_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
-	return kmap_atomic_pfn_prot(pfn, kmap_prot);
+	return kmap_temporary_page_prot(page, prot);
 }
 
 static inline void *kmap_atomic(struct page *page)
@@ -112,9 +131,10 @@ static inline void *kmap_atomic(struct p
 	return kmap_atomic_prot(page, kmap_prot);
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void *kmap_atomic_pfn(unsigned long pfn)
 {
-	kumap_atomic_indexed(addr);
+	preempt_disable();
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
 }
 
 /* declarations for linux/mm/highmem.c */
@@ -177,6 +197,22 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_temporary_page(struct page *page)
+{
+	pagefault_disable();
+	return page_address(page);
+}
+
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_temporary_page(page);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_page(pfn_to_page(pfn));
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -194,12 +230,8 @@ static inline void *kmap_atomic_pfn(unsi
 	return kmap_atomic(pfn_to_page(pfn));
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_temporary(void *addr)
 {
-	/*
-	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
-	 * handles preemption
-	 */
 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 	kunmap_flush_on_unmap(addr);
 #endif
@@ -217,10 +249,16 @@ static inline void __kunmap_atomic(void
 #define kunmap_atomic(addr)						\
 	do {								\
 		BUILD_BUG_ON(__same_type((addr), struct page *));	\
-		__kunmap_atomic(addr);					\
+		__kunmap_temporary(addr);				\
 		preempt_enable();					\
 	} while (0)
 
+#define kunmap_temporary(addr)						\
+	do {								\
+		BUILD_BUG_ON(__same_type((addr), struct page *));	\
+		__kunmap_temporary(addr);				\
+	} while (0)
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -432,7 +432,7 @@ static pte_t *kmap_get_pte(void)
 	return __kmap_pte;
 }
 
-static void *__kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+static void *do_kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pte_t pteval, *kmap_pte = kmap_get_pte();
 	unsigned long vaddr;
@@ -451,14 +451,14 @@ static void *__kmap_atomic_pfn_prot(unsi
 	return (void *)vaddr;
 }
 
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pagefault_disable();
-	return __kmap_atomic_pfn_prot(pfn, prot);
+	return do_kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL(kmap_atomic_pfn_prot);
+EXPORT_SYMBOL(__kmap_temporary_pfn_prot);
 
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot)
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
 	void *kmap;
 
@@ -471,11 +471,11 @@ void *kmap_atomic_page_prot(struct page
 	if (kmap)
 		return kmap;
 
-	return __kmap_atomic_pfn_prot(page_to_pfn(page), prot);
+	return do_kmap_temporary_pfn_prot(page_to_pfn(page), prot);
 }
-EXPORT_SYMBOL(kmap_atomic_page_prot);
+EXPORT_SYMBOL(__kmap_temporary_page_prot);
 
-void kunmap_atomic_indexed(void *vaddr)
+void kunmap_temporary_indexed(void *vaddr)
 {
 	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
 	pte_t *kmap_pte = kmap_get_pte();
@@ -503,7 +503,7 @@ void kunmap_atomic_indexed(void *vaddr)
 	preempt_enable();
 	pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_indexed);
+EXPORT_SYMBOL(kunmap_temporary_indexed);
 
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next)
 {

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	dri-devel <dri-devel@lists.freedesktop.org>,
	Ben Segall <bsegall@google.com>,
	Max Filippov <jcmvbkbc@gmail.com>, Guo Ren <guoren@kernel.org>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	linux-arch@vger.kernel.org,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, David Airlie <airlied@linux.ie>,
	Mel Gorman <mgorman@suse.de>,
	linux-snps-arc@lists.infradead.org,
	linux-xtensa@linux-xtensa.org, Paul McKenney <paulmck@kernel.org>,
	intel-gfx <intel-gfx@lists.freedesktop.org>,
	linuxppc-dev@lists.ozlabs.org,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>, Linux-MM <linux-mm@kvack.org>,
	Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Paul Mackerras <paulus@samba.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	"David S. Miller" <davem@davemloft.net>,
	Greentime Hu <green.hu@gmail.com>
Subject: [Intel-gfx] [patch RFC 15/15] mm/highmem: Provide kmap_temporary*
Date: Sat, 19 Sep 2020 11:18:06 +0200	[thread overview]
Message-ID: <20200919092617.375720378@linutronix.de> (raw)
In-Reply-To: 20200919091751.011116649@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_temporary and iomap_temporary interfaces can be invoked from both
preemptible and atomic context.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or per CPUness. Needs to be done on a case by
case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/iomap.h |   16 ++++++++-
 arch/x86/mm/iomap_32.c       |    7 +---
 include/linux/highmem.h      |   70 +++++++++++++++++++++++++++++++++----------
 mm/highmem.c                 |   18 +++++------
 4 files changed, 80 insertions(+), 31 deletions(-)

--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -13,11 +13,23 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+static inline void __iomem *iomap_atomic_pfn_prot(unsigned long pfn,
+						  pgprot_t prot)
+{
+	preempt_disable();
+	return iomap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void iounmap_temporary(void __iomem *vaddr)
+{
+	kunmap_temporary_indexed((void __force *)vaddr);
+}
 
 static inline void iounmap_atomic(void __iomem *vaddr)
 {
-	kunmap_atomic_indexed((void __force *)vaddr);
+	iounmap_temporary(vaddr);
 	preempt_enable();
 }
 
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *iomap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	/*
 	 * For non-PAT systems, translate non-WB request to UC- just in
@@ -60,7 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
 	/* Filter out unsupported __PAGE_KERNEL* bits: */
 	pgprot_val(prot) &= __default_kernel_pte_mask;
 
-	preempt_disable();
-	return (void __force __iomem *)kmap_atomic_pfn_prot(pfn, prot);
+	return (void __force __iomem *)__kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(iomap_temporary_pfn_prot);
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -35,9 +35,9 @@ static inline void invalidate_kernel_vma
  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
  */
 #ifdef CONFIG_KMAP_ATOMIC_GENERIC
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot);
-void kunmap_atomic_indexed(void *vaddr);
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot);
+void kunmap_temporary_indexed(void *vaddr);
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next);
 # ifndef ARCH_NEEDS_KMAP_HIGH_GET
 static inline void *arch_kmap_temporary_high_get(struct page *page)
@@ -95,16 +95,35 @@ static inline void kunmap(struct page *p
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
-	preempt_disable();
-	return kmap_atomic_page_prot(page, prot);
+	return __kmap_temporary_page_prot(page, prot);
 }
 
-static inline void *kmap_atomic_pfn(unsigned long pfn)
+static inline void *kmap_temporary_page(struct page *page)
+{
+	return kmap_temporary_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+	return __kmap_temporary_pfn_prot(pfn, prot);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_temporary(void *vaddr)
+{
+	kunmap_temporary_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
-	return kmap_atomic_pfn_prot(pfn, kmap_prot);
+	return kmap_temporary_page_prot(page, prot);
 }
 
 static inline void *kmap_atomic(struct page *page)
@@ -112,9 +131,10 @@ static inline void *kmap_atomic(struct p
 	return kmap_atomic_prot(page, kmap_prot);
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void *kmap_atomic_pfn(unsigned long pfn)
 {
-	kumap_atomic_indexed(addr);
+	preempt_disable();
+	return kmap_temporary_pfn_prot(pfn, kmap_prot);
 }
 
 /* declarations for linux/mm/highmem.c */
@@ -177,6 +197,22 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_temporary_page(struct page *page)
+{
+	pagefault_disable();
+	return page_address(page);
+}
+
+static inline void *kmap_temporary_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_temporary_page(page);
+}
+
+static inline void *kmap_temporary_pfn(unsigned long pfn)
+{
+	return kmap_temporary_page(pfn_to_page(pfn));
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -194,12 +230,8 @@ static inline void *kmap_atomic_pfn(unsi
 	return kmap_atomic(pfn_to_page(pfn));
 }
 
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_temporary(void *addr)
 {
-	/*
-	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
-	 * handles preemption
-	 */
 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 	kunmap_flush_on_unmap(addr);
 #endif
@@ -217,10 +249,16 @@ static inline void __kunmap_atomic(void
 #define kunmap_atomic(addr)						\
 	do {								\
 		BUILD_BUG_ON(__same_type((addr), struct page *));	\
-		__kunmap_atomic(addr);					\
+		__kunmap_temporary(addr);				\
 		preempt_enable();					\
 	} while (0)
 
+#define kunmap_temporary(addr)						\
+	do {								\
+		BUILD_BUG_ON(__same_type((addr), struct page *));	\
+		__kunmap_temporary(addr);				\
+	} while (0)
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -432,7 +432,7 @@ static pte_t *kmap_get_pte(void)
 	return __kmap_pte;
 }
 
-static void *__kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+static void *do_kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pte_t pteval, *kmap_pte = kmap_get_pte();
 	unsigned long vaddr;
@@ -451,14 +451,14 @@ static void *__kmap_atomic_pfn_prot(unsi
 	return (void *)vaddr;
 }
 
-void *kmap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void *__kmap_temporary_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
 	pagefault_disable();
-	return __kmap_atomic_pfn_prot(pfn, prot);
+	return do_kmap_temporary_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL(kmap_atomic_pfn_prot);
+EXPORT_SYMBOL(__kmap_temporary_pfn_prot);
 
-void *kmap_atomic_page_prot(struct page *page, pgprot_t prot)
+void *__kmap_temporary_page_prot(struct page *page, pgprot_t prot)
 {
 	void *kmap;
 
@@ -471,11 +471,11 @@ void *kmap_atomic_page_prot(struct page
 	if (kmap)
 		return kmap;
 
-	return __kmap_atomic_pfn_prot(page_to_pfn(page), prot);
+	return do_kmap_temporary_pfn_prot(page_to_pfn(page), prot);
 }
-EXPORT_SYMBOL(kmap_atomic_page_prot);
+EXPORT_SYMBOL(__kmap_temporary_page_prot);
 
-void kunmap_atomic_indexed(void *vaddr)
+void kunmap_temporary_indexed(void *vaddr)
 {
 	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
 	pte_t *kmap_pte = kmap_get_pte();
@@ -503,7 +503,7 @@ void kunmap_atomic_indexed(void *vaddr)
 	preempt_enable();
 	pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_indexed);
+EXPORT_SYMBOL(kunmap_temporary_indexed);
 
 void kmap_switch_temporary(struct task_struct *prev, struct task_struct *next)
 {

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-09-19  9:50 UTC|newest]

Thread overview: 428+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-19  9:17 [patch RFC 00/15] mm/highmem: Provide a preemptible variant of kmap_atomic & friends Thomas Gleixner
2020-09-19  9:17 ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17 ` Thomas Gleixner
2020-09-19  9:17 ` Thomas Gleixner
2020-09-19  9:17 ` Thomas Gleixner
2020-09-19  9:17 ` Thomas Gleixner
2020-09-19  9:17 ` Thomas Gleixner
2020-09-19  9:17 ` Thomas Gleixner
2020-09-19  9:17 ` [patch RFC 01/15] mm/highmem: Un-EXPORT __kmap_atomic_idx() Thomas Gleixner
2020-09-19  9:17   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-21  6:23   ` Christoph Hellwig
2020-09-21  6:23     ` [Intel-gfx] " Christoph Hellwig
2020-09-21  6:23     ` Christoph Hellwig
2020-09-21  6:23     ` Christoph Hellwig
2020-09-21  6:23     ` Christoph Hellwig
2020-09-21  6:23     ` Christoph Hellwig
2020-09-19  9:17 ` [patch RFC 02/15] highmem: Provide generic variant of kmap_atomic* Thomas Gleixner
2020-09-19  9:17   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-21  6:28   ` Christoph Hellwig
2020-09-21  6:28     ` [Intel-gfx] " Christoph Hellwig
2020-09-21  6:28     ` Christoph Hellwig
2020-09-21  6:28     ` Christoph Hellwig
2020-09-21  6:28     ` Christoph Hellwig
2020-09-21  6:28     ` Christoph Hellwig
2020-09-19  9:17 ` [patch RFC 03/15] x86/mm/highmem: Use generic kmap atomic implementation Thomas Gleixner
2020-09-19  9:17   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17 ` [patch RFC 04/15] arc/mm/highmem: " Thomas Gleixner
2020-09-19  9:17   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17 ` [patch RFC 05/15] ARM: highmem: Switch to generic kmap atomic Thomas Gleixner
2020-09-19  9:17   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17 ` [patch RFC 06/15] csky/mm/highmem: " Thomas Gleixner
2020-09-19  9:17   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-23  0:05   ` Guo Ren
2020-09-23  0:05     ` [Intel-gfx] " Guo Ren
2020-09-23  0:05     ` Guo Ren
2020-09-23  0:05     ` Guo Ren
2020-09-23  0:05     ` Guo Ren
2020-09-23  0:05     ` Guo Ren
2020-09-23  0:05     ` Guo Ren
2020-09-23  0:05     ` Guo Ren
2020-09-19  9:17 ` [patch RFC 07/15] microblaze/mm/highmem: " Thomas Gleixner
2020-09-19  9:17   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17 ` [patch RFC 08/15] mips/mm/highmem: " Thomas Gleixner
2020-09-19  9:17   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:17   ` Thomas Gleixner
2020-09-19  9:18 ` [patch RFC 09/15] nds32/mm/highmem: " Thomas Gleixner
2020-09-19  9:18   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18 ` [patch RFC 10/15] powerpc/mm/highmem: " Thomas Gleixner
2020-09-19  9:18   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18 ` [patch RFC 11/15] sparc/mm/highmem: " Thomas Gleixner
2020-09-19  9:18   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18 ` [patch RFC 12/15] xtensa/mm/highmem: " Thomas Gleixner
2020-09-19  9:18   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18 ` [patch RFC 13/15] mm/highmem: Remove the old kmap_atomic cruft Thomas Gleixner
2020-09-19  9:18   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18 ` [patch RFC 14/15] sched: highmem: Store temporary kmaps in task struct Thomas Gleixner
2020-09-19  9:18   ` [Intel-gfx] " Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18 ` Thomas Gleixner [this message]
2020-09-19  9:18   ` [Intel-gfx] [patch RFC 15/15] mm/highmem: Provide kmap_temporary* Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19  9:18   ` Thomas Gleixner
2020-09-19 10:03 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for mm/highmem: Provide a preemptible variant of kmap_atomic & friends Patchwork
2020-09-19 10:05 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-09-19 10:24 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-09-19 10:35 ` [patch RFC 00/15] " Daniel Vetter
2020-09-19 10:35   ` [Intel-gfx] " Daniel Vetter
2020-09-19 10:35   ` Daniel Vetter
2020-09-19 10:35   ` Daniel Vetter
2020-09-19 10:35   ` Daniel Vetter
2020-09-19 10:35   ` Daniel Vetter
2020-09-19 10:35   ` Daniel Vetter
2020-09-19 10:35   ` Daniel Vetter
2020-09-19 10:37   ` Daniel Vetter
2020-09-19 10:37     ` [Intel-gfx] " Daniel Vetter
2020-09-19 10:37     ` Daniel Vetter
2020-09-19 10:37     ` Daniel Vetter
2020-09-19 10:37     ` Daniel Vetter
2020-09-19 10:37     ` Daniel Vetter
2020-09-19 10:37     ` Daniel Vetter
2020-09-19 10:37     ` Daniel Vetter
2020-09-20  6:23     ` Thomas Gleixner
2020-09-20  6:23       ` [Intel-gfx] " Thomas Gleixner
2020-09-20  6:23       ` Thomas Gleixner
2020-09-20  6:23       ` Thomas Gleixner
2020-09-20  6:23       ` Thomas Gleixner
2020-09-20  6:23       ` Thomas Gleixner
2020-09-20  6:23       ` Thomas Gleixner
2020-09-20  8:23       ` Daniel Vetter
2020-09-20  8:23         ` [Intel-gfx] " Daniel Vetter
2020-09-20  8:23         ` Daniel Vetter
2020-09-20  8:23         ` Daniel Vetter
2020-09-20  8:23         ` Daniel Vetter
2020-09-20  8:23         ` Daniel Vetter
2020-09-20  8:23         ` Daniel Vetter
2020-09-20 17:24         ` Thomas Gleixner
2020-09-20 17:24           ` [Intel-gfx] " Thomas Gleixner
2020-09-20 17:24           ` Thomas Gleixner
2020-09-20 17:24           ` Thomas Gleixner
2020-09-20 17:24           ` Thomas Gleixner
2020-09-20 17:24           ` Thomas Gleixner
2020-09-20 17:24           ` Thomas Gleixner
2020-09-19 11:34 ` [Intel-gfx] ✓ Fi.CI.IGT: success for " Patchwork
2020-09-19 17:18 ` [patch RFC 00/15] " Linus Torvalds
2020-09-19 17:18   ` [Intel-gfx] " Linus Torvalds
2020-09-19 17:18   ` Linus Torvalds
2020-09-19 17:18   ` Linus Torvalds
2020-09-19 17:18   ` Linus Torvalds
2020-09-19 17:18   ` Linus Torvalds
2020-09-19 17:18   ` Linus Torvalds
2020-09-19 17:18   ` Linus Torvalds
2020-09-19 17:39   ` Matthew Wilcox
2020-09-19 17:39     ` [Intel-gfx] " Matthew Wilcox
2020-09-19 17:39     ` Matthew Wilcox
2020-09-19 17:39     ` Matthew Wilcox
2020-09-19 17:39     ` Matthew Wilcox
2020-09-19 17:39     ` Matthew Wilcox
2020-09-19 17:39     ` Matthew Wilcox
2020-09-19 17:39     ` Matthew Wilcox
2020-09-19 19:13     ` Linus Torvalds
2020-09-19 19:13       ` [Intel-gfx] " Linus Torvalds
2020-09-19 19:13       ` Linus Torvalds
2020-09-19 19:13       ` Linus Torvalds
2020-09-19 19:13       ` Linus Torvalds
2020-09-19 19:13       ` Linus Torvalds
2020-09-19 19:13       ` Linus Torvalds
2020-09-19 19:13       ` Linus Torvalds
2020-09-21 19:58     ` Ira Weiny
2020-09-21 19:58       ` [Intel-gfx] " Ira Weiny
2020-09-21 19:58       ` Ira Weiny
2020-09-21 19:58       ` Ira Weiny
2020-09-21 19:58       ` Ira Weiny
2020-09-21 19:58       ` Ira Weiny
2020-09-21 19:58       ` Ira Weiny
2020-09-20  6:41   ` Thomas Gleixner
2020-09-20  6:41     ` [Intel-gfx] " Thomas Gleixner
2020-09-20  6:41     ` Thomas Gleixner
2020-09-20  6:41     ` Thomas Gleixner
2020-09-20  6:41     ` Thomas Gleixner
2020-09-20  6:41     ` Thomas Gleixner
2020-09-20  6:41     ` Thomas Gleixner
2020-09-20  6:41     ` Thomas Gleixner
2020-09-20  8:49     ` Thomas Gleixner
2020-09-20  8:49       ` [Intel-gfx] " Thomas Gleixner
2020-09-20  8:49       ` Thomas Gleixner
2020-09-20  8:49       ` Thomas Gleixner
2020-09-20  8:49       ` Thomas Gleixner
2020-09-20  8:49       ` Thomas Gleixner
2020-09-20  8:49       ` Thomas Gleixner
2020-09-20  8:49       ` Thomas Gleixner
2020-09-20 16:57       ` Linus Torvalds
2020-09-20 16:57         ` [Intel-gfx] " Linus Torvalds
2020-09-20 16:57         ` Linus Torvalds
2020-09-20 16:57         ` Linus Torvalds
2020-09-20 16:57         ` Linus Torvalds
2020-09-20 16:57         ` Linus Torvalds
2020-09-20 16:57         ` Linus Torvalds
2020-09-20 16:57         ` Linus Torvalds
2020-09-20 17:40         ` Thomas Gleixner
2020-09-20 17:40           ` [Intel-gfx] " Thomas Gleixner
2020-09-20 17:40           ` Thomas Gleixner
2020-09-20 17:40           ` Thomas Gleixner
2020-09-20 17:40           ` Thomas Gleixner
2020-09-20 17:40           ` Thomas Gleixner
2020-09-20 17:40           ` Thomas Gleixner
2020-09-20 17:40           ` Thomas Gleixner
2020-09-20 17:42           ` Linus Torvalds
2020-09-20 17:42             ` [Intel-gfx] " Linus Torvalds
2020-09-20 17:42             ` Linus Torvalds
2020-09-20 17:42             ` Linus Torvalds
2020-09-20 17:42             ` Linus Torvalds
2020-09-20 17:42             ` Linus Torvalds
2020-09-20 17:42             ` Linus Torvalds
2020-09-20 17:42             ` Linus Torvalds
2020-09-20 17:58             ` Linus Torvalds
2020-09-20 17:58               ` [Intel-gfx] " Linus Torvalds
2020-09-20 17:58               ` Linus Torvalds
2020-09-20 17:58               ` Linus Torvalds
2020-09-20 17:58               ` Linus Torvalds
2020-09-20 17:58               ` Linus Torvalds
2020-09-20 17:58               ` Linus Torvalds
2020-09-20 17:58               ` Linus Torvalds
2020-09-21  7:39             ` Thomas Gleixner
2020-09-21  7:39               ` [Intel-gfx] " Thomas Gleixner
2020-09-21  7:39               ` Thomas Gleixner
2020-09-21  7:39               ` Thomas Gleixner
2020-09-21  7:39               ` Thomas Gleixner
2020-09-21  7:39               ` Thomas Gleixner
2020-09-21  7:39               ` Thomas Gleixner
2020-09-21  7:39               ` Thomas Gleixner
2020-09-21 16:24               ` Linus Torvalds
2020-09-21 16:24                 ` [Intel-gfx] " Linus Torvalds
2020-09-21 16:24                 ` Linus Torvalds
2020-09-21 16:24                 ` Linus Torvalds
2020-09-21 16:24                 ` Linus Torvalds
2020-09-21 16:24                 ` Linus Torvalds
2020-09-21 16:24                 ` Linus Torvalds
2020-09-21 16:24                 ` Linus Torvalds
2020-09-21 19:27                 ` Thomas Gleixner
2020-09-21 19:27                   ` [Intel-gfx] " Thomas Gleixner
2020-09-21 19:27                   ` Thomas Gleixner
2020-09-21 19:27                   ` Thomas Gleixner
2020-09-21 19:27                   ` Thomas Gleixner
2020-09-21 19:27                   ` Thomas Gleixner
2020-09-21 19:27                   ` Thomas Gleixner
2020-09-21 19:27                   ` Thomas Gleixner
2020-09-23  8:40                   ` peterz
2020-09-23  8:40                     ` [Intel-gfx] " peterz
2020-09-23  8:40                     ` peterz
2020-09-23  8:40                     ` peterz
2020-09-23  8:40                     ` peterz
2020-09-23  8:40                     ` peterz
2020-09-23  8:40                     ` peterz
2020-09-23  8:40                     ` peterz
2020-09-23 13:35                     ` Thomas Gleixner
2020-09-23 13:35                       ` [Intel-gfx] " Thomas Gleixner
2020-09-23 13:35                       ` Thomas Gleixner
2020-09-23 13:35                       ` Thomas Gleixner
2020-09-23 13:35                       ` Thomas Gleixner
2020-09-23 13:35                       ` Thomas Gleixner
2020-09-23 13:35                       ` Thomas Gleixner
2020-09-23 13:35                       ` Thomas Gleixner
2020-09-23 15:52                     ` Steven Rostedt
2020-09-23 15:52                       ` [Intel-gfx] " Steven Rostedt
2020-09-23 15:52                       ` Steven Rostedt
2020-09-23 15:52                       ` Steven Rostedt
2020-09-23 15:52                       ` Steven Rostedt
2020-09-23 15:52                       ` Steven Rostedt
2020-09-23 15:52                       ` Steven Rostedt
2020-09-23 15:52                       ` Steven Rostedt
2020-09-23 20:55                       ` Thomas Gleixner
2020-09-23 20:55                         ` [Intel-gfx] " Thomas Gleixner
2020-09-23 20:55                         ` Thomas Gleixner
2020-09-23 20:55                         ` Thomas Gleixner
2020-09-23 20:55                         ` Thomas Gleixner
2020-09-23 20:55                         ` Thomas Gleixner
2020-09-23 20:55                         ` Thomas Gleixner
2020-09-23 20:55                         ` Thomas Gleixner
2020-09-23 21:12                         ` Steven Rostedt
2020-09-23 21:12                           ` [Intel-gfx] " Steven Rostedt
2020-09-23 21:12                           ` Steven Rostedt
2020-09-23 21:12                           ` Steven Rostedt
2020-09-23 21:12                           ` Steven Rostedt
2020-09-23 21:12                           ` Steven Rostedt
2020-09-23 21:12                           ` Steven Rostedt
2020-09-23 21:12                           ` Steven Rostedt
2020-09-24  6:57                           ` Thomas Gleixner
2020-09-24  6:57                             ` [Intel-gfx] " Thomas Gleixner
2020-09-24  6:57                             ` Thomas Gleixner
2020-09-24  6:57                             ` Thomas Gleixner
2020-09-24  6:57                             ` Thomas Gleixner
2020-09-24  6:57                             ` Thomas Gleixner
2020-09-24  6:57                             ` Thomas Gleixner
2020-09-24  6:57                             ` Thomas Gleixner
2020-09-24 12:32                             ` Steven Rostedt
2020-09-24 12:32                               ` [Intel-gfx] " Steven Rostedt
2020-09-24 12:32                               ` Steven Rostedt
2020-09-24 12:32                               ` Steven Rostedt
2020-09-24 12:32                               ` Steven Rostedt
2020-09-24 12:32                               ` Steven Rostedt
2020-09-24 12:32                               ` Steven Rostedt
2020-09-24 12:32                               ` Steven Rostedt
2020-09-24 12:42                               ` Peter Zijlstra
2020-09-24 12:42                                 ` [Intel-gfx] " Peter Zijlstra
2020-09-24 12:42                                 ` Peter Zijlstra
2020-09-24 12:42                                 ` Peter Zijlstra
2020-09-24 12:42                                 ` Peter Zijlstra
2020-09-24 12:42                                 ` Peter Zijlstra
2020-09-24 12:42                                 ` Peter Zijlstra
2020-09-24 12:42                                 ` Peter Zijlstra
2020-09-24 13:51                                 ` Steven Rostedt
2020-09-24 13:51                                   ` [Intel-gfx] " Steven Rostedt
2020-09-24 13:51                                   ` Steven Rostedt
2020-09-24 13:51                                   ` Steven Rostedt
2020-09-24 13:51                                   ` Steven Rostedt
2020-09-24 13:51                                   ` Steven Rostedt
2020-09-24 13:51                                   ` Steven Rostedt
2020-09-24 13:51                                   ` Steven Rostedt
2020-09-24 13:58                                   ` Peter Zijlstra
2020-09-24 13:58                                     ` [Intel-gfx] " Peter Zijlstra
2020-09-24 13:58                                     ` Peter Zijlstra
2020-09-24 13:58                                     ` Peter Zijlstra
2020-09-24 13:58                                     ` Peter Zijlstra
2020-09-24 13:58                                     ` Peter Zijlstra
2020-09-24 13:58                                     ` Peter Zijlstra
2020-09-24 13:58                                     ` Peter Zijlstra
2020-09-24 17:55                               ` Thomas Gleixner
2020-09-24 17:55                                 ` [Intel-gfx] " Thomas Gleixner
2020-09-24 17:55                                 ` Thomas Gleixner
2020-09-24 17:55                                 ` Thomas Gleixner
2020-09-24 17:55                                 ` Thomas Gleixner
2020-09-24 17:55                                 ` Thomas Gleixner
2020-09-24 17:55                                 ` Thomas Gleixner
2020-09-24 17:55                                 ` Thomas Gleixner
2020-09-24 18:58                                 ` Steven Rostedt
2020-09-24 18:58                                   ` [Intel-gfx] " Steven Rostedt
2020-09-24 18:58                                   ` Steven Rostedt
2020-09-24 18:58                                   ` Steven Rostedt
2020-09-24 18:58                                   ` Steven Rostedt
2020-09-24 18:58                                   ` Steven Rostedt
2020-09-24 18:58                                   ` Steven Rostedt
2020-09-24 18:58                                   ` Steven Rostedt
2020-09-24  8:27                       ` peterz
2020-09-24  8:27                         ` [Intel-gfx] " peterz
2020-09-24  8:27                         ` peterz
2020-09-24  8:27                         ` peterz
2020-09-24  8:27                         ` peterz
2020-09-24  8:27                         ` peterz
2020-09-24  8:27                         ` peterz
2020-09-24  8:27                         ` peterz
2020-09-24 19:36                         ` Daniel Bristot de Oliveira
2020-09-24 19:36                           ` [Intel-gfx] " Daniel Bristot de Oliveira
2020-09-24 19:36                           ` Daniel Bristot de Oliveira
2020-09-24 19:36                           ` Daniel Bristot de Oliveira
2020-09-24 19:36                           ` Daniel Bristot de Oliveira
2020-09-24 19:36                           ` Daniel Bristot de Oliveira
2020-09-24 19:36                           ` Daniel Bristot de Oliveira
2020-09-24 19:36                           ` Daniel Bristot de Oliveira
2020-09-23 10:19                   ` peterz
2020-09-23 10:19                     ` [Intel-gfx] " peterz
2020-09-23 10:19                     ` peterz
2020-09-23 10:19                     ` peterz
2020-09-23 10:19                     ` peterz
2020-09-23 10:19                     ` peterz
2020-09-23 10:19                     ` peterz
2020-09-23 10:19                     ` peterz
2020-09-23 12:33                     ` Thomas Gleixner
2020-09-23 12:33                       ` [Intel-gfx] " Thomas Gleixner
2020-09-23 12:33                       ` Thomas Gleixner
2020-09-23 12:33                       ` Thomas Gleixner
2020-09-23 12:33                       ` Thomas Gleixner
2020-09-23 12:33                       ` Thomas Gleixner
2020-09-23 12:33                       ` Thomas Gleixner
2020-09-23 12:33                       ` Thomas Gleixner
2020-09-23 14:33                   ` Thomas Gleixner
2020-09-23 14:33                     ` [Intel-gfx] " Thomas Gleixner
2020-09-23 14:33                     ` Thomas Gleixner
2020-09-23 14:33                     ` Thomas Gleixner
2020-09-23 14:33                     ` Thomas Gleixner
2020-09-23 14:33                     ` Thomas Gleixner
2020-09-23 14:33                     ` Thomas Gleixner
2020-09-23 14:33                     ` Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200919092617.375720378@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=airlied@linux.ie \
    --cc=akpm@linux-foundation.org \
    --cc=ardb@kernel.org \
    --cc=arnd@arndb.de \
    --cc=benh@kernel.crashing.org \
    --cc=bigeasy@linutronix.de \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=chris@zankel.net \
    --cc=daniel@ffwll.ch \
    --cc=davem@davemloft.net \
    --cc=deanbo422@gmail.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=green.hu@gmail.com \
    --cc=guoren@kernel.org \
    --cc=herbert@gondor.apana.org.au \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=jani.nikula@linux.intel.com \
    --cc=jcmvbkbc@gmail.com \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=juri.lelli@redhat.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-snps-arc@lists.infradead.org \
    --cc=linux-xtensa@linux-xtensa.org \
    --cc=linux@armlinux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mgorman@suse.de \
    --cc=monstr@monstr.eu \
    --cc=mpe@ellerman.id.au \
    --cc=nickhu@andestech.com \
    --cc=paulmck@kernel.org \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=rodrigo.vivi@intel.com \
    --cc=rostedt@goodmis.org \
    --cc=sparclinux@vger.kernel.org \
    --cc=torvalds@linuxfoundation.org \
    --cc=tsbogend@alpha.franken.de \
    --cc=vgupta@synopsys.com \
    --cc=vincent.guittot@linaro.org \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.