All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Linus Torvalds <torvalds@linuxfoundation.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Paul McKenney <paulmck@kernel.org>,
	Christoph Hellwig <hch@lst.de>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-mm@kvack.org, Alexander Viro <viro@zeniv.linux.org.uk>,
	Benjamin LaHaise <bcrl@kvack.org>,
	linux-fsdevel@vger.kernel.org, linux-aio@kvack.org,
	Chris Mason <clm@fb.com>, Josef Bacik <josef@toxicpanda.com>,
	David Sterba <dsterba@suse.com>,
	linux-btrfs@vger.kernel.org, x86@kernel.org,
	Vineet Gupta <vgupta@synopsys.com>,
	linux-snps-arc@lists.infradead.org,
	Russell King <linux@armlinux.org.uk>,
	Arnd Bergmann <arnd@arndb.de>,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	linux-mips@vger.kernel.org, Nick Hu <nickhu@andestech.com>,
	Greentime Hu <green.hu@gmail.com>,
	Vincent Chen <deanbo422@gmail.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	sparclinux@vger.kernel.org, Chris Zankel <chris@zankel.net>,
	Max Filippov <jcmvbkbc@gmail.com>,
	linux-xtensa@linux-xtensa.org, Ingo Molnar <mingo@kernel.org>,
	Juri Lelli <juri.lelli@redhat.com>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Ben Segall <bsegall@google.com>, Mel Gorman <mgorman@suse.de>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Christian Koenig <christian.koenig@amd.com>,
	Huang Rui <ray.huang@amd.com>, David Airlie <airlied@linux.ie>,
	Daniel Vetter <daniel@ffwll.ch>,
	dri-devel@lists.freedesktop.org,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Roland Scheidegger <sroland@vmware.com>,
	Dave Airlie <airlied@redhat.com>,
	Gerd Hoffmann <kraxel@redhat.com>,
	virtualization@lists.linux-foundation.org,
	spice-devel@lists.freedesktop.org,
	Ben Skeggs <bskeggs@redhat.com>,
	nouveau@lists.freedesktop.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	intel-gfx@lists.freedesktop.org
Subject: [patch V3 25/37] mm/highmem: Provide kmap_local*
Date: Tue, 03 Nov 2020 10:27:37 +0100	[thread overview]
Message-ID: <20201103095859.132846644@linutronix.de> (raw)
In-Reply-To: 20201103092712.714480842@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V3: Move migrate disable into the actual highmem mapping code so it only
    affects real highmem mappings.
   
V2: Make it more consistent and add commentry
---
 include/linux/highmem-internal.h |   48 +++++++++++++++++++++++++++++++++++++++
 include/linux/highmem.h          |   43 +++++++++++++++++++++-------------
 mm/highmem.c                     |    6 ++++
 3 files changed, 81 insertions(+), 16 deletions(-)

--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -69,6 +69,26 @@ static inline void kmap_flush_unused(voi
 	__kmap_flush_unused();
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+	kunmap_local_indexed(vaddr);
+}
+
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
@@ -141,6 +161,28 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -182,4 +224,10 @@ do {								\
 	__kunmap_atomic(__addr);				\
 } while (0)
 
+#define kunmap_local(__addr)					\
+do {								\
+	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
+	__kunmap_local(__addr);					\
+} while (0)
+
 #endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
 static inline void kmap_flush_unused(void);
 
 /**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
  * @page:	Pointer to the page to be mapped
  *
  * Returns: The virtual address of the mapping
  *
- * Side effect: On return pagefaults and preemption are disabled.
- *
  * Can be invoked from any context.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
  * ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
  *
  * Unmapping addr1 before addr2 is invalid and causes malfunction.
  *
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
  * virtual address of the direct mapping. Only real highmem pages are
  * temporarily mapped.
  *
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page:	Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
  */
 static inline void *kmap_atomic(struct page *page);
 
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
  *
  * Counterpart to kmap_atomic().
  *
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
  * preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
  */
 
 /* Highmem related interfaces for management code */
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
 	unsigned long vaddr;
 	int idx;
 
+	/*
+	 * Disable migration so resulting virtual address is stable
+	 * accross preemption.
+	 */
+	migrate_disable();
 	preempt_disable();
 	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
 	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
 	kmap_local_idx_pop();
 	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(kunmap_local_indexed);
 


WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	linux-aio@kvack.org, Peter Zijlstra <peterz@infradead.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org,
	Ben Segall <bsegall@google.com>, Chris Mason <clm@fb.com>,
	Huang Rui <ray.huang@amd.com>, Paul Mackerras <paulus@samba.org>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Christoph Hellwig <hch@lst.de>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Paul McKenney <paulmck@kernel.org>,
	Max Filippov <jcmvbkbc@gmail.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
	David Airlie <airlied@linux.ie>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Mel Gorman <mgorman@suse.de>,
	nouveau@lists.freedesktop.org, Dave Airlie <airlied@redhat.com>,
	linux-snps-arc@lists.infradead.org,
	Ben Skeggs <bskeggs@redhat.com>,
	linux-xtensa@linux-xtensa.org, Arnd Bergmann <arnd@arndb.de>,
	intel-gfx@lists.freedesktop.org,
	Roland Scheidegger <sroland@vmware.com>,
	Josef Bacik <josef@toxicpanda.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	spice-devel@lists.freedesktop.org,
	David Sterba <dsterba@suse.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>,
	linux-mm@kvack.org, Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org,
	Christian Koenig <christian.koenig@amd.com>,
	Benjamin LaHaise <bcrl@kvack.org>,
	linux-fsdevel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	linux-btrfs@vger.kernel.org, Greentime Hu <green.hu@gmail.com>
Subject: [patch V3 25/37] mm/highmem: Provide kmap_local*
Date: Tue, 03 Nov 2020 09:27:37 +0000	[thread overview]
Message-ID: <20201103095859.132846644@linutronix.de> (raw)
In-Reply-To: 20201103092712.714480842@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V3: Move migrate disable into the actual highmem mapping code so it only
    affects real highmem mappings.
   
V2: Make it more consistent and add commentry
---
 include/linux/highmem-internal.h |   48 +++++++++++++++++++++++++++++++++++++++
 include/linux/highmem.h          |   43 +++++++++++++++++++++-------------
 mm/highmem.c                     |    6 ++++
 3 files changed, 81 insertions(+), 16 deletions(-)

--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -69,6 +69,26 @@ static inline void kmap_flush_unused(voi
 	__kmap_flush_unused();
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+	kunmap_local_indexed(vaddr);
+}
+
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
@@ -141,6 +161,28 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -182,4 +224,10 @@ do {								\
 	__kunmap_atomic(__addr);				\
 } while (0)
 
+#define kunmap_local(__addr)					\
+do {								\
+	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
+	__kunmap_local(__addr);					\
+} while (0)
+
 #endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
 static inline void kmap_flush_unused(void);
 
 /**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
  * @page:	Pointer to the page to be mapped
  *
  * Returns: The virtual address of the mapping
  *
- * Side effect: On return pagefaults and preemption are disabled.
- *
  * Can be invoked from any context.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
  * ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
  *
  * Unmapping addr1 before addr2 is invalid and causes malfunction.
  *
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
  * virtual address of the direct mapping. Only real highmem pages are
  * temporarily mapped.
  *
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page:	Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
  */
 static inline void *kmap_atomic(struct page *page);
 
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
  *
  * Counterpart to kmap_atomic().
  *
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
  * preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
  */
 
 /* Highmem related interfaces for management code */
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
 	unsigned long vaddr;
 	int idx;
 
+	/*
+	 * Disable migration so resulting virtual address is stable
+	 * accross preemption.
+	 */
+	migrate_disable();
 	preempt_disable();
 	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
 	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
 	kmap_local_idx_pop();
 	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(kunmap_local_indexed);
 

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	linux-aio@kvack.org, Peter Zijlstra <peterz@infradead.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org,
	Ben Segall <bsegall@google.com>, Chris Mason <clm@fb.com>,
	Huang Rui <ray.huang@amd.com>, Paul Mackerras <paulus@samba.org>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Christoph Hellwig <hch@lst.de>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Paul McKenney <paulmck@kernel.org>,
	Max Filippov <jcmvbkbc@gmail.com>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
	David Airlie <airlied@linux.ie>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Mel Gorman <mgorman@suse.de>,
	nouveau@lists.freedesktop.org, Dave Airlie <airlied@redhat.com>,
	linux-snps-arc@lists.infradead.org,
	Ben Skeggs <bskeggs@redhat.com>,
	linux-xtensa@linux-xtensa.org, Arnd Bergmann <arnd@arndb.de>,
	intel-gfx@lists.freedesktop.org,
	Roland Scheidegger <sroland@vmware.com>,
	Josef Bacik <josef@toxicpanda.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	spice-devel@lists.freedesktop.org,
	David Sterba <dsterba@suse.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>,
	linux-mm@kvack.org, Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org,
	Christian Koenig <christian.koenig@amd.com>,
	Benjamin LaHaise <bcrl@kvack.org>,
	Daniel Vetter <daniel@ffwll.ch>,
	linux-fsdevel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	linux-btrfs@vger.kernel.org, Greentime Hu <green.hu@gmail.com>
Subject: [patch V3 25/37] mm/highmem: Provide kmap_local*
Date: Tue, 03 Nov 2020 10:27:37 +0100	[thread overview]
Message-ID: <20201103095859.132846644@linutronix.de> (raw)
In-Reply-To: 20201103092712.714480842@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V3: Move migrate disable into the actual highmem mapping code so it only
    affects real highmem mappings.
   
V2: Make it more consistent and add commentry
---
 include/linux/highmem-internal.h |   48 +++++++++++++++++++++++++++++++++++++++
 include/linux/highmem.h          |   43 +++++++++++++++++++++-------------
 mm/highmem.c                     |    6 ++++
 3 files changed, 81 insertions(+), 16 deletions(-)

--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -69,6 +69,26 @@ static inline void kmap_flush_unused(voi
 	__kmap_flush_unused();
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+	kunmap_local_indexed(vaddr);
+}
+
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
@@ -141,6 +161,28 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -182,4 +224,10 @@ do {								\
 	__kunmap_atomic(__addr);				\
 } while (0)
 
+#define kunmap_local(__addr)					\
+do {								\
+	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
+	__kunmap_local(__addr);					\
+} while (0)
+
 #endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
 static inline void kmap_flush_unused(void);
 
 /**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
  * @page:	Pointer to the page to be mapped
  *
  * Returns: The virtual address of the mapping
  *
- * Side effect: On return pagefaults and preemption are disabled.
- *
  * Can be invoked from any context.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
  * ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
  *
  * Unmapping addr1 before addr2 is invalid and causes malfunction.
  *
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
  * virtual address of the direct mapping. Only real highmem pages are
  * temporarily mapped.
  *
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page:	Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
  */
 static inline void *kmap_atomic(struct page *page);
 
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
  *
  * Counterpart to kmap_atomic().
  *
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
  * preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
  */
 
 /* Highmem related interfaces for management code */
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
 	unsigned long vaddr;
 	int idx;
 
+	/*
+	 * Disable migration so resulting virtual address is stable
+	 * accross preemption.
+	 */
+	migrate_disable();
 	preempt_disable();
 	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
 	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
 	kmap_local_idx_pop();
 	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(kunmap_local_indexed);
 


WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	linux-aio@kvack.org, Peter Zijlstra <peterz@infradead.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org,
	Ben Segall <bsegall@google.com>, Chris Mason <clm@fb.com>,
	Huang Rui <ray.huang@amd.com>, Paul Mackerras <paulus@samba.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Christoph Hellwig <hch@lst.de>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Paul McKenney <paulmck@kernel.org>,
	Max Filippov <jcmvbkbc@gmail.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
	David Airlie <air>
Subject: [patch V3 25/37] mm/highmem: Provide kmap_local*
Date: Tue, 03 Nov 2020 10:27:37 +0100	[thread overview]
Message-ID: <20201103095859.132846644@linutronix.de> (raw)
In-Reply-To: 20201103092712.714480842@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V3: Move migrate disable into the actual highmem mapping code so it only
    affects real highmem mappings.
   
V2: Make it more consistent and add commentry
---
 include/linux/highmem-internal.h |   48 +++++++++++++++++++++++++++++++++++++++
 include/linux/highmem.h          |   43 +++++++++++++++++++++-------------
 mm/highmem.c                     |    6 ++++
 3 files changed, 81 insertions(+), 16 deletions(-)

--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -69,6 +69,26 @@ static inline void kmap_flush_unused(voi
 	__kmap_flush_unused();
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+	kunmap_local_indexed(vaddr);
+}
+
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
@@ -141,6 +161,28 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -182,4 +224,10 @@ do {								\
 	__kunmap_atomic(__addr);				\
 } while (0)
 
+#define kunmap_local(__addr)					\
+do {								\
+	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
+	__kunmap_local(__addr);					\
+} while (0)
+
 #endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
 static inline void kmap_flush_unused(void);
 
 /**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
  * @page:	Pointer to the page to be mapped
  *
  * Returns: The virtual address of the mapping
  *
- * Side effect: On return pagefaults and preemption are disabled.
- *
  * Can be invoked from any context.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
  * ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
  *
  * Unmapping addr1 before addr2 is invalid and causes malfunction.
  *
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
  * virtual address of the direct mapping. Only real highmem pages are
  * temporarily mapped.
  *
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page:	Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
  */
 static inline void *kmap_atomic(struct page *page);
 
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
  *
  * Counterpart to kmap_atomic().
  *
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
  * preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
  */
 
 /* Highmem related interfaces for management code */
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
 	unsigned long vaddr;
 	int idx;
 
+	/*
+	 * Disable migration so resulting virtual address is stable
+	 * accross preemption.
+	 */
+	migrate_disable();
 	preempt_disable();
 	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
 	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
 	kmap_local_idx_pop();
 	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(kunmap_local_indexed);

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	linux-aio@kvack.org, Peter Zijlstra <peterz@infradead.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org,
	Ben Segall <bsegall@google.com>, Chris Mason <clm@fb.com>,
	Huang Rui <ray.huang@amd.com>, Paul Mackerras <paulus@samba.org>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Christoph Hellwig <hch@lst.de>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Paul McKenney <paulmck@kernel.org>,
	Max Filippov <jcmvbkbc@gmail.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
	David Airlie <airlied@linux.ie>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Mel Gorman <mgorman@suse.de>,
	nouveau@lists.freedesktop.org, Dave Airlie <airlied@redhat.com>,
	linux-snps-arc@lists.infradead.org,
	Ben Skeggs <bskeggs@redhat.com>,
	linux-xtensa@linux-xtensa.org, Arnd Bergmann <arnd@arndb.de>,
	intel-gfx@lists.freedesktop.org,
	Roland Scheidegger <sroland@vmware.com>,
	Josef Bacik <josef@toxicpanda.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	spice-devel@lists.freedesktop.org,
	David Sterba <dsterba@suse.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>,
	linux-mm@kvack.org, Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org,
	Christian Koenig <christian.koenig@amd.com>,
	Benjamin LaHaise <bcrl@kvack.org>,
	Daniel Vetter <daniel@ffwll.ch>,
	linux-fsdevel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	linux-btrfs@vger.kernel.org, Greentime Hu <green.hu@gmail.com>
Subject: [patch V3 25/37] mm/highmem: Provide kmap_local*
Date: Tue, 03 Nov 2020 10:27:37 +0100	[thread overview]
Message-ID: <20201103095859.132846644@linutronix.de> (raw)
In-Reply-To: 20201103092712.714480842@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V3: Move migrate disable into the actual highmem mapping code so it only
    affects real highmem mappings.
   
V2: Make it more consistent and add commentry
---
 include/linux/highmem-internal.h |   48 +++++++++++++++++++++++++++++++++++++++
 include/linux/highmem.h          |   43 +++++++++++++++++++++-------------
 mm/highmem.c                     |    6 ++++
 3 files changed, 81 insertions(+), 16 deletions(-)

--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -69,6 +69,26 @@ static inline void kmap_flush_unused(voi
 	__kmap_flush_unused();
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+	kunmap_local_indexed(vaddr);
+}
+
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
@@ -141,6 +161,28 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -182,4 +224,10 @@ do {								\
 	__kunmap_atomic(__addr);				\
 } while (0)
 
+#define kunmap_local(__addr)					\
+do {								\
+	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
+	__kunmap_local(__addr);					\
+} while (0)
+
 #endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
 static inline void kmap_flush_unused(void);
 
 /**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
  * @page:	Pointer to the page to be mapped
  *
  * Returns: The virtual address of the mapping
  *
- * Side effect: On return pagefaults and preemption are disabled.
- *
  * Can be invoked from any context.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
  * ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
  *
  * Unmapping addr1 before addr2 is invalid and causes malfunction.
  *
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
  * virtual address of the direct mapping. Only real highmem pages are
  * temporarily mapped.
  *
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page:	Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
  */
 static inline void *kmap_atomic(struct page *page);
 
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
  *
  * Counterpart to kmap_atomic().
  *
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
  * preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
  */
 
 /* Highmem related interfaces for management code */
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
 	unsigned long vaddr;
 	int idx;
 
+	/*
+	 * Disable migration so resulting virtual address is stable
+	 * accross preemption.
+	 */
+	migrate_disable();
 	preempt_disable();
 	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
 	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
 	kmap_local_idx_pop();
 	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(kunmap_local_indexed);
 


_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	linux-aio@kvack.org, Peter Zijlstra <peterz@infradead.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org,
	Ben Segall <bsegall@google.com>, Chris Mason <clm@fb.com>,
	Huang Rui <ray.huang@amd.com>, Paul Mackerras <paulus@samba.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Christoph Hellwig <hch@lst.de>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Paul McKenney <paulmck@kernel.org>,
	Max Filippov <jcmvbkbc@gmail.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
	David Airlie <airlied@linux.ie>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Mel Gorman <mgorman@suse.de>,
	nouveau@lists.freedesktop.org, Dave Airlie <airlied@redhat.com>,
	linux-snps-arc@lists.infradead.org,
	Ben Skeggs <bskeggs@redhat.com>,
	linux-xtensa@linux-xtensa.org, Arnd Bergmann <arnd@arndb.de>,
	intel-gfx@lists.freedesktop.org,
	Roland Scheidegger <sroland@vmware.com>,
	Josef Bacik <josef@toxicpanda.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	spice-devel@lists.freedesktop.org,
	David Sterba <dsterba@suse.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>,
	linux-mm@kvack.org, Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org,
	Christian Koenig <christian.koenig@amd.com>,
	Benjamin LaHaise <bcrl@kvack.org>,
	Daniel Vetter <daniel@ffwll.ch>,
	linux-fsdevel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	linux-btrfs@vger.kernel.org, Greentime Hu <green.hu@gmail.com>
Subject: [patch V3 25/37] mm/highmem: Provide kmap_local*
Date: Tue, 03 Nov 2020 10:27:37 +0100	[thread overview]
Message-ID: <20201103095859.132846644@linutronix.de> (raw)
In-Reply-To: 20201103092712.714480842@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V3: Move migrate disable into the actual highmem mapping code so it only
    affects real highmem mappings.
   
V2: Make it more consistent and add commentry
---
 include/linux/highmem-internal.h |   48 +++++++++++++++++++++++++++++++++++++++
 include/linux/highmem.h          |   43 +++++++++++++++++++++-------------
 mm/highmem.c                     |    6 ++++
 3 files changed, 81 insertions(+), 16 deletions(-)

--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -69,6 +69,26 @@ static inline void kmap_flush_unused(voi
 	__kmap_flush_unused();
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+	kunmap_local_indexed(vaddr);
+}
+
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
@@ -141,6 +161,28 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -182,4 +224,10 @@ do {								\
 	__kunmap_atomic(__addr);				\
 } while (0)
 
+#define kunmap_local(__addr)					\
+do {								\
+	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
+	__kunmap_local(__addr);					\
+} while (0)
+
 #endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
 static inline void kmap_flush_unused(void);
 
 /**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
  * @page:	Pointer to the page to be mapped
  *
  * Returns: The virtual address of the mapping
  *
- * Side effect: On return pagefaults and preemption are disabled.
- *
  * Can be invoked from any context.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
  * ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
  *
  * Unmapping addr1 before addr2 is invalid and causes malfunction.
  *
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
  * virtual address of the direct mapping. Only real highmem pages are
  * temporarily mapped.
  *
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page:	Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
  */
 static inline void *kmap_atomic(struct page *page);
 
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
  *
  * Counterpart to kmap_atomic().
  *
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
  * preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
  */
 
 /* Highmem related interfaces for management code */
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
 	unsigned long vaddr;
 	int idx;
 
+	/*
+	 * Disable migration so resulting virtual address is stable
+	 * accross preemption.
+	 */
+	migrate_disable();
 	preempt_disable();
 	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
 	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
 	kmap_local_idx_pop();
 	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(kunmap_local_indexed);
 

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	linux-aio@kvack.org, Peter Zijlstra <peterz@infradead.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org,
	Ben Segall <bsegall@google.com>, Chris Mason <clm@fb.com>,
	Huang Rui <ray.huang@amd.com>, Paul Mackerras <paulus@samba.org>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Christoph Hellwig <hch@lst.de>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Paul McKenney <paulmck@kernel.org>,
	Max Filippov <jcmvbkbc@gmail.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
	David Airlie <airlied@linux.ie>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Mel Gorman <mgorman@suse.de>,
	nouveau@lists.freedesktop.org, Dave Airlie <airlied@redhat.com>,
	linux-snps-arc@lists.infradead.org,
	Ben Skeggs <bskeggs@redhat.com>,
	linux-xtensa@linux-xtensa.org, Arnd Bergmann <arnd@arndb.de>,
	intel-gfx@lists.freedesktop.org,
	Roland Scheidegger <sroland@vmware.com>,
	Josef Bacik <josef@toxicpanda.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	spice-devel@lists.freedesktop.org,
	David Sterba <dsterba@suse.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>,
	linux-mm@kvack.org, Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org,
	Christian Koenig <christian.koenig@amd.com>,
	Benjamin LaHaise <bcrl@kvack.org>,
	linux-fsdevel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	linux-btrfs@vger.kernel.org, Greentime Hu <green.hu@gmail.com>
Subject: [patch V3 25/37] mm/highmem: Provide kmap_local*
Date: Tue, 03 Nov 2020 10:27:37 +0100	[thread overview]
Message-ID: <20201103095859.132846644@linutronix.de> (raw)
In-Reply-To: 20201103092712.714480842@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V3: Move migrate disable into the actual highmem mapping code so it only
    affects real highmem mappings.
   
V2: Make it more consistent and add commentry
---
 include/linux/highmem-internal.h |   48 +++++++++++++++++++++++++++++++++++++++
 include/linux/highmem.h          |   43 +++++++++++++++++++++-------------
 mm/highmem.c                     |    6 ++++
 3 files changed, 81 insertions(+), 16 deletions(-)

--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -69,6 +69,26 @@ static inline void kmap_flush_unused(voi
 	__kmap_flush_unused();
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+	kunmap_local_indexed(vaddr);
+}
+
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
@@ -141,6 +161,28 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -182,4 +224,10 @@ do {								\
 	__kunmap_atomic(__addr);				\
 } while (0)
 
+#define kunmap_local(__addr)					\
+do {								\
+	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
+	__kunmap_local(__addr);					\
+} while (0)
+
 #endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
 static inline void kmap_flush_unused(void);
 
 /**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
  * @page:	Pointer to the page to be mapped
  *
  * Returns: The virtual address of the mapping
  *
- * Side effect: On return pagefaults and preemption are disabled.
- *
  * Can be invoked from any context.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
  * ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
  *
  * Unmapping addr1 before addr2 is invalid and causes malfunction.
  *
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
  * virtual address of the direct mapping. Only real highmem pages are
  * temporarily mapped.
  *
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page:	Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
  */
 static inline void *kmap_atomic(struct page *page);
 
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
  *
  * Counterpart to kmap_atomic().
  *
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
  * preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
  */
 
 /* Highmem related interfaces for management code */
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
 	unsigned long vaddr;
 	int idx;
 
+	/*
+	 * Disable migration so resulting virtual address is stable
+	 * accross preemption.
+	 */
+	migrate_disable();
 	preempt_disable();
 	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
 	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
 	kmap_local_idx_pop();
 	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(kunmap_local_indexed);
 

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>,
	linux-aio@kvack.org, Peter Zijlstra <peterz@infradead.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org,
	Ben Segall <bsegall@google.com>, Chris Mason <clm@fb.com>,
	Huang Rui <ray.huang@amd.com>, Paul Mackerras <paulus@samba.org>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	sparclinux@vger.kernel.org, Vincent Chen <deanbo422@gmail.com>,
	Christoph Hellwig <hch@lst.de>,
	Paul McKenney <paulmck@kernel.org>,
	Max Filippov <jcmvbkbc@gmail.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	linux-csky@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
	David Airlie <airlied@linux.ie>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Mel Gorman <mgorman@suse.de>,
	nouveau@lists.freedesktop.org, Dave Airlie <airlied@redhat.com>,
	linux-snps-arc@lists.infradead.org,
	Ben Skeggs <bskeggs@redhat.com>,
	linux-xtensa@linux-xtensa.org, Arnd Bergmann <arnd@arndb.de>,
	intel-gfx@lists.freedesktop.org,
	Roland Scheidegger <sroland@vmware.com>,
	Josef Bacik <josef@toxicpanda.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Linus Torvalds <torvalds@linuxfoundation.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	spice-devel@lists.freedesktop.org,
	David Sterba <dsterba@suse.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Chris Zankel <chris@zankel.net>, Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Nick Hu <nickhu@andestech.com>,
	linux-mm@kvack.org, Vineet Gupta <vgupta@synopsys.com>,
	linux-mips@vger.kernel.org,
	Christian Koenig <christian.koenig@amd.com>,
	Benjamin LaHaise <bcrl@kvack.org>,
	linux-fsdevel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	linux-btrfs@vger.kernel.org, Greentime Hu <green.hu@gmail.com>
Subject: [Intel-gfx] [patch V3 25/37] mm/highmem: Provide kmap_local*
Date: Tue, 03 Nov 2020 10:27:37 +0100	[thread overview]
Message-ID: <20201103095859.132846644@linutronix.de> (raw)
In-Reply-To: 20201103092712.714480842@linutronix.de

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V3: Move migrate disable into the actual highmem mapping code so it only
    affects real highmem mappings.
   
V2: Make it more consistent and add commentry
---
 include/linux/highmem-internal.h |   48 +++++++++++++++++++++++++++++++++++++++
 include/linux/highmem.h          |   43 +++++++++++++++++++++-------------
 mm/highmem.c                     |    6 ++++
 3 files changed, 81 insertions(+), 16 deletions(-)

--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -69,6 +69,26 @@ static inline void kmap_flush_unused(voi
 	__kmap_flush_unused();
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+	kunmap_local_indexed(vaddr);
+}
+
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	preempt_disable();
@@ -141,6 +161,28 @@ static inline void kunmap(struct page *p
 #endif
 }
 
+static inline void *kmap_local_page(struct page *page)
+{
+	return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+	return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+	return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+	kunmap_flush_on_unmap(addr);
+#endif
+}
+
 static inline void *kmap_atomic(struct page *page)
 {
 	preempt_disable();
@@ -182,4 +224,10 @@ do {								\
 	__kunmap_atomic(__addr);				\
 } while (0)
 
+#define kunmap_local(__addr)					\
+do {								\
+	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
+	__kunmap_local(__addr);					\
+} while (0)
+
 #endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
 static inline void kmap_flush_unused(void);
 
 /**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
  * @page:	Pointer to the page to be mapped
  *
  * Returns: The virtual address of the mapping
  *
- * Side effect: On return pagefaults and preemption are disabled.
- *
  * Can be invoked from any context.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
  * ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
  *
  * Unmapping addr1 before addr2 is invalid and causes malfunction.
  *
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
  * virtual address of the direct mapping. Only real highmem pages are
  * temporarily mapped.
  *
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page:	Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
  */
 static inline void *kmap_atomic(struct page *page);
 
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
  *
  * Counterpart to kmap_atomic().
  *
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
  * preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
  */
 
 /* Highmem related interfaces for management code */
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
 	unsigned long vaddr;
 	int idx;
 
+	/*
+	 * Disable migration so resulting virtual address is stable
+	 * accross preemption.
+	 */
+	migrate_disable();
 	preempt_disable();
 	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
 	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
 	kmap_local_idx_pop();
 	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(kunmap_local_indexed);
 

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-11-03 10:35 UTC|newest]

Thread overview: 401+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-03  9:27 [patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends Thomas Gleixner
2020-11-03  9:27 ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27 ` Thomas Gleixner
2020-11-03  9:27 ` Thomas Gleixner
2020-11-03  9:27 ` Thomas Gleixner
2020-11-03  9:27 ` Thomas Gleixner
2020-11-03  9:27 ` Thomas Gleixner
2020-11-03  9:27 ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 01/37] mm/highmem: Un-EXPORT __kmap_atomic_idx() Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 02/37] highmem: Remove unused functions Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 03/37] fs: Remove asm/kmap_types.h includes Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03 11:12   ` David Sterba
2020-11-03 11:12     ` [Intel-gfx] " David Sterba
2020-11-03 11:12     ` David Sterba
2020-11-03 11:12     ` David Sterba
2020-11-03 11:12     ` David Sterba
2020-11-03 11:12     ` David Sterba
2020-11-03 11:12     ` David Sterba
2020-11-03 11:12     ` David Sterba
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 04/37] sh/highmem: Remove all traces of unused cruft Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 05/37] asm-generic: Provide kmap_size.h Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03 12:25   ` Arnd Bergmann
2020-11-03 12:25     ` [Intel-gfx] " Arnd Bergmann
2020-11-03 12:25     ` Arnd Bergmann
2020-11-03 12:25     ` Arnd Bergmann
2020-11-03 12:25     ` Arnd Bergmann
2020-11-03 12:25     ` Arnd Bergmann
2020-11-03 12:25     ` Arnd Bergmann
2020-11-03 12:25     ` Arnd Bergmann
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 06/37] highmem: Provide generic variant of kmap_atomic* Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 07/37] highmem: Make DEBUG_HIGHMEM functional Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 08/37] x86/mm/highmem: Use generic kmap atomic implementation Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 09/37] arc/mm/highmem: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 10/37] ARM: highmem: Switch to generic kmap atomic Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
     [not found]   ` <CGME20201112081036eucas1p14e135a370d3bccab311727fd2e89f4df@eucas1p1.samsung.com>
2020-11-12  8:10     ` [patch V3 10/37] " Marek Szyprowski
2020-11-12  8:10       ` Marek Szyprowski
2020-11-12  8:10       ` Marek Szyprowski
2020-11-12  8:10       ` Marek Szyprowski
2020-11-12  8:10       ` Marek Szyprowski
2020-11-12  8:10       ` Marek Szyprowski
2020-11-12  8:10       ` Marek Szyprowski
2020-11-12 11:03       ` Thomas Gleixner
2020-11-12 11:03         ` Thomas Gleixner
2020-11-12 11:03         ` Thomas Gleixner
2020-11-12 11:03         ` Thomas Gleixner
2020-11-12 11:03         ` Thomas Gleixner
2020-11-12 11:03         ` Thomas Gleixner
2020-11-12 11:03         ` Thomas Gleixner
2020-11-12 11:03         ` Thomas Gleixner
2020-11-12 11:07       ` Sebastian Andrzej Siewior
2020-11-12 11:07         ` Sebastian Andrzej Siewior
2020-11-12 11:07         ` Sebastian Andrzej Siewior
2020-11-12 11:07         ` Sebastian Andrzej Siewior
2020-11-12 11:07         ` Sebastian Andrzej Siewior
2020-11-12 11:07         ` Sebastian Andrzej Siewior
2020-11-12 11:07         ` Sebastian Andrzej Siewior
2020-11-12 11:07         ` Sebastian Andrzej Siewior
2020-11-03  9:27 ` [patch V3 11/37] csky/mm/highmem: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 12/37] microblaze/mm/highmem: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 13/37] mips/mm/highmem: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 14/37] nds32/mm/highmem: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 15/37] powerpc/mm/highmem: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 16/37] sparc/mm/highmem: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 17/37] xtensa/mm/highmem: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 18/37] highmem: Get rid of kmap_types.h Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 19/37] mm/highmem: Remove the old kmap_atomic cruft Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 20/37] io-mapping: Cleanup atomic iomap Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 21/37] Documentation/io-mapping: Remove outdated blurb Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 22/37] highmem: High implementation details and document API Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03 17:48   ` Linus Torvalds
2020-11-03 17:48     ` [Intel-gfx] " Linus Torvalds
2020-11-03 17:48     ` Linus Torvalds
2020-11-03 17:48     ` Linus Torvalds
2020-11-03 17:48     ` Linus Torvalds
2020-11-03 17:48     ` Linus Torvalds
2020-11-03 17:48     ` Linus Torvalds
2020-11-03 17:48     ` Linus Torvalds
2020-11-03 17:48     ` Linus Torvalds
2020-11-03 19:00     ` Thomas Gleixner
2020-11-03 19:00       ` [Intel-gfx] " Thomas Gleixner
2020-11-03 19:00       ` Thomas Gleixner
2020-11-03 19:00       ` Thomas Gleixner
2020-11-03 19:00       ` Thomas Gleixner
2020-11-03 19:00       ` Thomas Gleixner
2020-11-03 19:00       ` Thomas Gleixner
2020-11-03 19:00       ` Thomas Gleixner
2020-11-03 19:00       ` Thomas Gleixner
2020-11-06 23:27   ` [tip: core/mm] " tip-bot2 for Thomas Gleixner
2020-11-03  9:27 ` [patch V3 23/37] sched: Make migrate_disable/enable() independent of RT Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 24/37] sched: highmem: Store local kmaps in task struct Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03 13:49   ` Thomas Gleixner
2020-11-03 13:49     ` [Intel-gfx] " Thomas Gleixner
2020-11-03 13:49     ` Thomas Gleixner
2020-11-03 13:49     ` Thomas Gleixner
2020-11-03 13:49     ` Thomas Gleixner
2020-11-03 13:49     ` Thomas Gleixner
2020-11-03 13:49     ` Thomas Gleixner
2020-11-03 13:49     ` Thomas Gleixner
2020-11-03 13:51   ` [patch V4 " Thomas Gleixner
2020-11-03 13:51     ` [Intel-gfx] " Thomas Gleixner
2020-11-03 13:51     ` Thomas Gleixner
2020-11-03 13:51     ` Thomas Gleixner
2020-11-03 13:51     ` Thomas Gleixner
2020-11-03 13:51     ` Thomas Gleixner
2020-11-03 13:51     ` Thomas Gleixner
2020-11-03 13:51     ` Thomas Gleixner
2020-11-03  9:27 ` Thomas Gleixner [this message]
2020-11-03  9:27   ` [Intel-gfx] [patch V3 25/37] mm/highmem: Provide kmap_local* Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 26/37] io-mapping: Provide iomap_local variant Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 27/37] x86/crashdump/32: Simplify copy_oldmem_page() Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 28/37] mips/crashdump: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 29/37] ARM: mm: Replace kmap_atomic_pfn() Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 30/37] highmem: Remove kmap_atomic_pfn() Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 31/37] drm/ttm: Replace kmap_atomic() usage Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 32/37] drm/vmgfx: Replace kmap_atomic() Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 33/37] highmem: Remove kmap_atomic_prot() Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 34/37] drm/qxl: Replace io_mapping_map_atomic_wc() Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 35/37] drm/nouveau/device: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 36/37] drm/i915: " Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27 ` [patch V3 37/37] io-mapping: Remove io_mapping_map_atomic_wc() Thomas Gleixner
2020-11-03  9:27   ` [Intel-gfx] " Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03  9:27   ` Thomas Gleixner
2020-11-03 13:33 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for mm/highmem: Preemptible variant of kmap_atomic & friends Patchwork
2020-11-03 13:57 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for mm/highmem: Preemptible variant of kmap_atomic & friends (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201103095859.132846644@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=airlied@linux.ie \
    --cc=airlied@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=bcrl@kvack.org \
    --cc=benh@kernel.crashing.org \
    --cc=bigeasy@linutronix.de \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=bskeggs@redhat.com \
    --cc=chris@zankel.net \
    --cc=christian.koenig@amd.com \
    --cc=clm@fb.com \
    --cc=daniel@ffwll.ch \
    --cc=davem@davemloft.net \
    --cc=deanbo422@gmail.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=dsterba@suse.com \
    --cc=green.hu@gmail.com \
    --cc=hch@lst.de \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=jani.nikula@linux.intel.com \
    --cc=jcmvbkbc@gmail.com \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=josef@toxicpanda.com \
    --cc=juri.lelli@redhat.com \
    --cc=kraxel@redhat.com \
    --cc=linux-aio@kvack.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-graphics-maintainer@vmware.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-snps-arc@lists.infradead.org \
    --cc=linux-xtensa@linux-xtensa.org \
    --cc=linux@armlinux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mgorman@suse.de \
    --cc=mingo@kernel.org \
    --cc=monstr@monstr.eu \
    --cc=mpe@ellerman.id.au \
    --cc=nickhu@andestech.com \
    --cc=nouveau@lists.freedesktop.org \
    --cc=paulmck@kernel.org \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=ray.huang@amd.com \
    --cc=rodrigo.vivi@intel.com \
    --cc=rostedt@goodmis.org \
    --cc=sparclinux@vger.kernel.org \
    --cc=spice-devel@lists.freedesktop.org \
    --cc=sroland@vmware.com \
    --cc=torvalds@linuxfoundation.org \
    --cc=tsbogend@alpha.franken.de \
    --cc=vgupta@synopsys.com \
    --cc=vincent.guittot@linaro.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.