linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH 1/2] mm/mmu_gather: Invalidate TLB correctly on batch allocation failure and flush
@ 2019-12-17  7:16 Aneesh Kumar K.V
  2019-12-17  7:16 ` [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush Aneesh Kumar K.V
  0 siblings, 1 reply; 6+ messages in thread
From: Aneesh Kumar K.V @ 2019-12-17  7:16 UTC (permalink / raw)
  To: akpm, npiggin, mpe; +Cc: linux-mm, linux-kernel, linuxppc-dev, Aneesh Kumar K.V

Architectures for which we have hardware walkers of Linux page table should
flush TLB on mmu gather batch allocation failures and batch flush. Some
architectures like POWER supports multiple translation modes (hash and radix)
and in the case of POWER only radix translation mode needs the above TLBI.
This is because for hash translation mode kernel wants to avoid this extra
flush since there are no hardware walkers of linux page table. With radix
translation, the hardware also walks linux page table and with that, kernel
needs to make sure to TLB invalidate page walk cache before page table pages are
freed.

More details in
commit: d86564a2f085 ("mm/tlb, x86/mm: Support invalidating TLB caches for RCU_TABLE_FREE")

Based on changes from Peter Zijlstra <peterz@infradead.org>

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 arch/Kconfig                    |  3 ---
 arch/powerpc/Kconfig            |  1 -
 arch/powerpc/include/asm/tlb.h  |  4 ++++
 arch/sparc/Kconfig              |  1 -
 arch/sparc/include/asm/tlb_64.h |  9 +++++++++
 include/asm-generic/tlb.h       | 22 +++++++++++++++-------
 mm/mmu_gather.c                 | 16 ++++++++--------
 7 files changed, 36 insertions(+), 20 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 48b5e103bdb0..208aad121630 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -396,9 +396,6 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
 config HAVE_RCU_TABLE_FREE
 	bool
 
-config HAVE_RCU_TABLE_NO_INVALIDATE
-	bool
-
 config HAVE_MMU_GATHER_PAGE_SIZE
 	bool
 
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1ec34e16ed65..a15f5584b0de 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -223,7 +223,6 @@ config PPC
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_RCU_TABLE_FREE		if SMP
-	select HAVE_RCU_TABLE_NO_INVALIDATE	if HAVE_RCU_TABLE_FREE
 	select HAVE_MMU_GATHER_PAGE_SIZE
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RELIABLE_STACKTRACE		if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index b2c0be93929d..feea1a09bbce 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -27,6 +27,10 @@
 #define tlb_flush tlb_flush
 extern void tlb_flush(struct mmu_gather *tlb);
 
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#define tlb_needs_table_invalidate()	radix_enabled()
+#endif
+
 /* Get the generic bits... */
 #include <asm-generic/tlb.h>
 
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index eb24cb1afc11..18e9fb6fcf1b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -65,7 +65,6 @@ config SPARC64
 	select HAVE_KRETPROBES
 	select HAVE_KPROBES
 	select HAVE_RCU_TABLE_FREE if SMP
-	select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
 	select HAVE_DYNAMIC_FTRACE
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index a2f3fa61ee36..8cb8f3833239 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -28,6 +28,15 @@ void flush_tlb_pending(void);
 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 #define tlb_flush(tlb)	flush_tlb_pending()
 
+/*
+ * SPARC64's hardware TLB fill does not use the Linux page-tables
+ * and therefore we don't need a TLBI when freeing page-table pages.
+ */
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#define tlb_needs_table_invalidate()	(false)
+#endif
+
 #include <asm-generic/tlb.h>
 
 #endif /* _SPARC64_TLB_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 2b10036fefd0..dcdf13fc0a0b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -137,13 +137,6 @@
  *  When used, an architecture is expected to provide __tlb_remove_table()
  *  which does the actual freeing of these pages.
  *
- *  HAVE_RCU_TABLE_NO_INVALIDATE
- *
- *  This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
- *  freeing the page-table pages. This can be avoided if you use
- *  HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
- *  page-tables natively.
- *
  *  MMU_GATHER_NO_RANGE
  *
  *  Use this if your architecture lacks an efficient flush_tlb_range().
@@ -189,8 +182,23 @@ struct mmu_table_batch {
 
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
+/*
+ * This allows an architecture that does not use the linux page-tables for
+ * hardware to skip the TLBI when freeing page tables.
+ */
+#ifndef tlb_needs_table_invalidate
+#define tlb_needs_table_invalidate() (true)
+#endif
+
+#else
+
+#ifdef tlb_needs_table_invalidate
+#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
 #endif
 
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+
+
 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 /*
  * If we can't allocate a page to make a big batch of page pointers
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 7d70e5c78f97..7c1b8f67af7b 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -102,14 +102,14 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
  */
 static inline void tlb_table_invalidate(struct mmu_gather *tlb)
 {
-#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
-	/*
-	 * Invalidate page-table caches used by hardware walkers. Then we still
-	 * need to RCU-sched wait while freeing the pages because software
-	 * walkers can still be in-flight.
-	 */
-	tlb_flush_mmu_tlbonly(tlb);
-#endif
+	if (tlb_needs_table_invalidate()) {
+		/*
+		 * Invalidate page-table caches used by hardware walkers. Then
+		 * we still need to RCU-sched wait while freeing the pages
+		 * because software walkers can still be in-flight.
+		 */
+		tlb_flush_mmu_tlbonly(tlb);
+	}
 }
 
 static void tlb_remove_table_smp_sync(void *arg)
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush
  2019-12-17  7:16 [RFC PATCH 1/2] mm/mmu_gather: Invalidate TLB correctly on batch allocation failure and flush Aneesh Kumar K.V
@ 2019-12-17  7:16 ` Aneesh Kumar K.V
  0 siblings, 0 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2019-12-17  7:16 UTC (permalink / raw)
  To: akpm, npiggin, mpe; +Cc: linux-mm, linux-kernel, linuxppc-dev, Aneesh Kumar K.V

On tlb_finish_mmu() kernel does a tlb flush before  mmu gather table invalidate.
The mmu gather table invalidate depending on kernel config also does another
TLBI. Avoid the later on tlb_finish_mmu().

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 mm/mmu_gather.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 7c1b8f67af7b..7e2bd43b9084 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -143,17 +143,23 @@ static void tlb_remove_table_rcu(struct rcu_head *head)
 	free_page((unsigned long)batch);
 }
 
-static void tlb_table_flush(struct mmu_gather *tlb)
+static void __tlb_table_flush(struct mmu_gather *tlb, bool table_inval)
 {
 	struct mmu_table_batch **batch = &tlb->batch;
 
 	if (*batch) {
-		tlb_table_invalidate(tlb);
+		if (table_inval)
+			tlb_table_invalidate(tlb);
 		call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
 		*batch = NULL;
 	}
 }
 
+static void tlb_table_flush(struct mmu_gather *tlb)
+{
+	__tlb_table_flush(tlb, true);
+}
+
 void tlb_remove_table(struct mmu_gather *tlb, void *table)
 {
 	struct mmu_table_batch **batch = &tlb->batch;
@@ -178,7 +184,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
-	tlb_table_flush(tlb);
+	__tlb_table_flush(tlb, false);
 #endif
 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 	tlb_batch_pages_flush(tlb);
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush
  2019-12-17 10:15     ` Aneesh Kumar K.V
@ 2019-12-17 12:34       ` Peter Zijlstra
  0 siblings, 0 replies; 6+ messages in thread
From: Peter Zijlstra @ 2019-12-17 12:34 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: akpm, npiggin, mpe, linux-mm, linux-kernel, linuxppc-dev

On Tue, Dec 17, 2019 at 03:45:36PM +0530, Aneesh Kumar K.V wrote:
> On 12/17/19 2:28 PM, Peter Zijlstra wrote:
> > On Tue, Dec 17, 2019 at 12:47:13PM +0530, Aneesh Kumar K.V wrote:
> > > On tlb_finish_mmu() kernel does a tlb flush before  mmu gather table invalidate.
> > > The mmu gather table invalidate depending on kernel config also does another
> > > TLBI. Avoid the later on tlb_finish_mmu().
> > 
> > That is already avoided, if you look at tlb_flush_mmu_tlbonly() it does
> > __tlb_range_reset(), which results in ->end = 0, which then triggers the
> > early exit on the next invocation:
> > 
> > 	if (!tlb->end)
> > 		return;
> > 
> 
> Is that true for tlb->fulmm flush?

Hmm, no, but I'm thinking you patch is broken, even for that case. We
must issue the TLBI before call_rcu().

Perhaps if we replace !tlb->end with something like:

  !tlb->freed_tables && !tlb->cleared_p*

(which GCC should be able to do with a single load and mask)

I've not really thought too hard about it yet, I need to run some
errands, but I'll look at it more closely when I get back.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush
  2019-12-17  8:58   ` Peter Zijlstra
@ 2019-12-17 10:15     ` Aneesh Kumar K.V
  2019-12-17 12:34       ` Peter Zijlstra
  0 siblings, 1 reply; 6+ messages in thread
From: Aneesh Kumar K.V @ 2019-12-17 10:15 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: akpm, npiggin, mpe, linux-mm, linux-kernel, linuxppc-dev

On 12/17/19 2:28 PM, Peter Zijlstra wrote:
> On Tue, Dec 17, 2019 at 12:47:13PM +0530, Aneesh Kumar K.V wrote:
>> On tlb_finish_mmu() kernel does a tlb flush before  mmu gather table invalidate.
>> The mmu gather table invalidate depending on kernel config also does another
>> TLBI. Avoid the later on tlb_finish_mmu().
> 
> That is already avoided, if you look at tlb_flush_mmu_tlbonly() it does
> __tlb_range_reset(), which results in ->end = 0, which then triggers the
> early exit on the next invocation:
> 
> 	if (!tlb->end)
> 		return;
> 

Is that true for tlb->fulmm flush?

-aneesh

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush
  2019-12-17  7:17 ` [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush Aneesh Kumar K.V
@ 2019-12-17  8:58   ` Peter Zijlstra
  2019-12-17 10:15     ` Aneesh Kumar K.V
  0 siblings, 1 reply; 6+ messages in thread
From: Peter Zijlstra @ 2019-12-17  8:58 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: akpm, npiggin, mpe, linux-mm, linux-kernel, linuxppc-dev

On Tue, Dec 17, 2019 at 12:47:13PM +0530, Aneesh Kumar K.V wrote:
> On tlb_finish_mmu() kernel does a tlb flush before  mmu gather table invalidate.
> The mmu gather table invalidate depending on kernel config also does another
> TLBI. Avoid the later on tlb_finish_mmu().

That is already avoided, if you look at tlb_flush_mmu_tlbonly() it does
__tlb_range_reset(), which results in ->end = 0, which then triggers the
early exit on the next invocation:

	if (!tlb->end)
		return;

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush
  2019-12-17  7:17 [RFC PATCH 1/2] mm/mmu_gather: Invalidate TLB correctly on batch allocation failure and flush Aneesh Kumar K.V
@ 2019-12-17  7:17 ` Aneesh Kumar K.V
  2019-12-17  8:58   ` Peter Zijlstra
  0 siblings, 1 reply; 6+ messages in thread
From: Aneesh Kumar K.V @ 2019-12-17  7:17 UTC (permalink / raw)
  To: akpm, npiggin, mpe, peterz
  Cc: linux-mm, linux-kernel, linuxppc-dev, Aneesh Kumar K.V

On tlb_finish_mmu() kernel does a tlb flush before  mmu gather table invalidate.
The mmu gather table invalidate depending on kernel config also does another
TLBI. Avoid the later on tlb_finish_mmu().

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 mm/mmu_gather.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 7c1b8f67af7b..7e2bd43b9084 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -143,17 +143,23 @@ static void tlb_remove_table_rcu(struct rcu_head *head)
 	free_page((unsigned long)batch);
 }
 
-static void tlb_table_flush(struct mmu_gather *tlb)
+static void __tlb_table_flush(struct mmu_gather *tlb, bool table_inval)
 {
 	struct mmu_table_batch **batch = &tlb->batch;
 
 	if (*batch) {
-		tlb_table_invalidate(tlb);
+		if (table_inval)
+			tlb_table_invalidate(tlb);
 		call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
 		*batch = NULL;
 	}
 }
 
+static void tlb_table_flush(struct mmu_gather *tlb)
+{
+	__tlb_table_flush(tlb, true);
+}
+
 void tlb_remove_table(struct mmu_gather *tlb, void *table)
 {
 	struct mmu_table_batch **batch = &tlb->batch;
@@ -178,7 +184,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
-	tlb_table_flush(tlb);
+	__tlb_table_flush(tlb, false);
 #endif
 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 	tlb_batch_pages_flush(tlb);
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-12-17 12:34 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-17  7:16 [RFC PATCH 1/2] mm/mmu_gather: Invalidate TLB correctly on batch allocation failure and flush Aneesh Kumar K.V
2019-12-17  7:16 ` [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush Aneesh Kumar K.V
2019-12-17  7:17 [RFC PATCH 1/2] mm/mmu_gather: Invalidate TLB correctly on batch allocation failure and flush Aneesh Kumar K.V
2019-12-17  7:17 ` [RFC PATCH 2/2] mm/mmu_gather: Avoid multiple page walk cache flush Aneesh Kumar K.V
2019-12-17  8:58   ` Peter Zijlstra
2019-12-17 10:15     ` Aneesh Kumar K.V
2019-12-17 12:34       ` Peter Zijlstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).