linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: will.deacon@arm.com, aneesh.kumar@linux.vnet.ibm.com,
	akpm@linux-foundation.org, npiggin@gmail.com
Cc: linux-arch@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, peterz@infradead.org,
	linux@armlinux.org.uk, heiko.carstens@de.ibm.com,
	riel@surriel.com
Subject: [PATCH v6 04/18] asm-generic/tlb: Provide generic tlb_flush() based on flush_tlb_range()
Date: Tue, 19 Feb 2019 11:31:52 +0100	[thread overview]
Message-ID: <20190219103233.090636436@infradead.org> (raw)
In-Reply-To: 20190219103148.192029670@infradead.org

Provide a generic tlb_flush() implementation that relies on
flush_tlb_range(). This is a little awkward because flush_tlb_range()
assumes a VMA for range invalidation, but we no longer have one.

Audit of all flush_tlb_range() implementations shows only vma->vm_mm
and vma->vm_flags are used, and of the latter only VM_EXEC (I-TLB
invalidates) and VM_HUGETLB (large TLB invalidate) are used.

Therefore, track VM_EXEC and VM_HUGETLB in two more bits, and create a
'fake' VMA.

This allows architectures that have a reasonably efficient
flush_tlb_range() to not require any additional effort.

Cc: Nick Piggin <npiggin@gmail.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/arm64/include/asm/tlb.h   |    1 
 arch/powerpc/include/asm/tlb.h |    1 
 arch/riscv/include/asm/tlb.h   |    1 
 arch/x86/include/asm/tlb.h     |    1 
 include/asm-generic/tlb.h      |   95 +++++++++++++++++++++++++++++++++++------
 5 files changed, 87 insertions(+), 12 deletions(-)

--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -27,6 +27,7 @@ static inline void __tlb_remove_table(vo
 	free_page_and_swap_cache((struct page *)_table);
 }
 
+#define tlb_flush tlb_flush
 static void tlb_flush(struct mmu_gather *tlb);
 
 #include <asm-generic/tlb.h>
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -28,6 +28,7 @@
 #define tlb_end_vma(tlb, vma)	do { } while (0)
 #define __tlb_remove_tlb_entry	__tlb_remove_tlb_entry
 
+#define tlb_flush tlb_flush
 extern void tlb_flush(struct mmu_gather *tlb);
 
 /* Get the generic bits... */
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -18,6 +18,7 @@ struct mmu_gather;
 
 static void tlb_flush(struct mmu_gather *tlb);
 
+#define tlb_flush tlb_flush
 #include <asm-generic/tlb.h>
 
 static inline void tlb_flush(struct mmu_gather *tlb)
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -6,6 +6,7 @@
 #define tlb_end_vma(tlb, vma) do { } while (0)
 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 
+#define tlb_flush tlb_flush
 static inline void tlb_flush(struct mmu_gather *tlb);
 
 #include <asm-generic/tlb.h>
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -95,7 +95,7 @@
  *    flush the entire TLB irrespective of the range. For instance
  *    x86-PAE needs this when changing top-level entries.
  *
- * And requires the architecture to provide and implement tlb_flush().
+ * And allows the architecture to provide and implement tlb_flush():
  *
  * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
  * use of:
@@ -111,7 +111,10 @@
  *
  *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
  *
- *    returns the smallest TLB entry size unmapped in this range
+ *    returns the smallest TLB entry size unmapped in this range.
+ *
+ * If an architecture does not provide tlb_flush() a default implementation
+ * based on flush_tlb_range() will be used.
  *
  * Additionally there are a few opt-in features:
  *
@@ -245,6 +248,12 @@ struct mmu_gather {
 	unsigned int		cleared_puds : 1;
 	unsigned int		cleared_p4ds : 1;
 
+	/*
+	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
+	 */
+	unsigned int		vma_exec : 1;
+	unsigned int		vma_huge : 1;
+
 	unsigned int		batch_count;
 
 	struct mmu_gather_batch *active;
@@ -286,8 +295,59 @@ static inline void __tlb_reset_range(str
 	tlb->cleared_pmds = 0;
 	tlb->cleared_puds = 0;
 	tlb->cleared_p4ds = 0;
+	/*
+	 * Do not reset mmu_gather::vma_* fields here, we do not
+	 * call into tlb_start_vma() again to set them if there is an
+	 * intermediate flush.
+	 */
 }
 
+#ifndef tlb_flush
+
+#if defined(tlb_start_vma) || defined(tlb_end_vma)
+#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
+#endif
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+	if (tlb->fullmm || tlb->need_flush_all) {
+		flush_tlb_mm(tlb->mm);
+	} else if (tlb->end) {
+		struct vm_area_struct vma = {
+			.vm_mm = tlb->mm,
+			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
+				    (tlb->vma_huge ? VM_HUGETLB : 0),
+		};
+
+		flush_tlb_range(&vma, tlb->start, tlb->end);
+	}
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	/*
+	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
+	 * mips-4k) flush only large pages.
+	 *
+	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
+	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
+	 * range.
+	 *
+	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
+	 * these values the batch is empty.
+	 */
+	tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
+	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+}
+
+#else
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#endif
+
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
 	if (!tlb->end)
@@ -357,19 +417,30 @@ static inline unsigned long tlb_get_unma
  * the vmas are adjusted to only cover the region to be torn down.
  */
 #ifndef tlb_start_vma
-#define tlb_start_vma(tlb, vma)						\
-do {									\
-	if (!tlb->fullmm)						\
-		flush_cache_range(vma, vma->vm_start, vma->vm_end);	\
-} while (0)
+static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (tlb->fullmm)
+		return;
+
+	tlb_update_vma_flags(tlb, vma);
+	flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
 #endif
 
 #ifndef tlb_end_vma
-#define tlb_end_vma(tlb, vma)						\
-do {									\
-	if (!tlb->fullmm)						\
-		tlb_flush_mmu_tlbonly(tlb);				\
-} while (0)
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (tlb->fullmm)
+		return;
+
+	/*
+	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
+	 * the ranges growing with the unused space between consecutive VMAs,
+	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
+	 * this.
+	 */
+	tlb_flush_mmu_tlbonly(tlb);
+}
 #endif
 
 #ifndef __tlb_remove_tlb_entry



  parent reply	other threads:[~2019-02-19 10:32 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-19 10:31 [PATCH v6 00/18] generic mmu_gather patches Peter Zijlstra
2019-02-19 10:31 ` [PATCH v6 01/18] asm-generic/tlb: Provide a comment Peter Zijlstra
2019-02-19 10:31 ` [PATCH v6 02/18] asm-generic/tlb: Provide HAVE_MMU_GATHER_PAGE_SIZE Peter Zijlstra
2019-02-19 10:31 ` [PATCH v6 03/18] asm-generic/tlb: Provide generic VIPT cache flush Peter Zijlstra
2019-02-19 10:31 ` Peter Zijlstra [this message]
2019-02-19 10:31 ` [PATCH v6 05/18] asm-generic/tlb: Provide generic tlb_flush() based on flush_tlb_mm() Peter Zijlstra
2019-02-19 12:47   ` Will Deacon
2019-02-19 10:31 ` [PATCH v6 06/18] asm-generic/tlb: Conditionally provide tlb_migrate_finish() Peter Zijlstra
2019-02-19 12:47   ` Will Deacon
2019-02-19 13:41     ` Peter Zijlstra
2019-02-20 14:47       ` Will Deacon
2019-02-20 15:02         ` Matthew Wilcox
2019-02-19 10:31 ` [PATCH v6 07/18] asm-generic/tlb: Invert HAVE_RCU_TABLE_INVALIDATE Peter Zijlstra
2019-02-19 10:31 ` [PATCH v6 08/18] arm/tlb: Convert to generic mmu_gather Peter Zijlstra
2019-02-19 10:31 ` [PATCH v6 09/18] ia64/tlb: Conver " Peter Zijlstra
2019-02-19 12:47   ` Will Deacon
2019-02-21  2:52   ` Souptick Joarder
2019-02-19 10:31 ` [PATCH v6 10/18] sh/tlb: Convert SH " Peter Zijlstra
2019-12-03 11:19   ` Geert Uytterhoeven
2019-12-04 10:47     ` Peter Zijlstra
2019-12-04 12:32       ` Geert Uytterhoeven
2019-12-04 13:22         ` Guenter Roeck
2019-12-04 15:17           ` Geert Uytterhoeven
2019-12-04 19:03             ` Guenter Roeck
2019-12-04 13:34         ` Peter Zijlstra
2019-12-04 15:07           ` Geert Uytterhoeven
2019-12-04 16:41             ` Peter Zijlstra
2019-12-05 15:26               ` Geert Uytterhoeven
2020-07-15 19:45         ` John Paul Adrian Glaubitz
2019-12-05 19:24       ` Rob Landley
2019-12-05 19:23         ` Rich Felker
2020-07-15 19:49         ` John Paul Adrian Glaubitz
2019-12-05 19:30       ` John Paul Adrian Glaubitz
2019-12-05 22:56         ` Guenter Roeck
2019-12-06 13:38           ` John Paul Adrian Glaubitz
2019-12-06 14:03             ` Guenter Roeck
2019-02-19 10:31 ` [PATCH v6 11/18] um/tlb: Convert " Peter Zijlstra
2019-02-19 10:32 ` [PATCH v6 12/18] arch/tlb: Clean up simple architectures Peter Zijlstra
2019-02-19 10:32 ` [PATCH v6 13/18] asm-generic/tlb: Introduce HAVE_MMU_GATHER_NO_GATHER Peter Zijlstra
2019-02-19 12:47   ` Will Deacon
2019-02-19 10:32 ` [PATCH v6 14/18] s390/tlb: convert to generic mmu_gather Peter Zijlstra
2019-02-19 12:47   ` Will Deacon
2019-02-19 10:32 ` [PATCH v6 15/18] asm-generic/tlb: Remove arch_tlb*_mmu() Peter Zijlstra
2019-02-19 10:32 ` [PATCH v6 16/18] asm-generic/tlb: Remove HAVE_GENERIC_MMU_GATHER Peter Zijlstra
2019-02-19 10:32 ` [PATCH v6 17/18] asm-generic/tlb: Remove tlb_flush_mmu_free() Peter Zijlstra
2019-02-19 10:32 ` [PATCH v6 18/18] asm-generic/tlb: Remove tlb_table_flush() Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190219103233.090636436@infradead.org \
    --to=peterz@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=heiko.carstens@de.ibm.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux@armlinux.org.uk \
    --cc=npiggin@gmail.com \
    --cc=riel@surriel.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).