All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@kernel.org>
To: Minchan Kim <minchan@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	will.deacon@arm.com, Bob Liu <liubo95@huawei.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	David Rientjes <rientjes@google.com>,
	Ingo Molnar <mingo@kernel.org>, Roman Gushchin <guro@fb.com>,
	Konstantin Khlebnikov <khlebnikov@yandex-team.ru>,
	Andrea Arcangeli <aarcange@redhat.com>
Subject: Re: [RESEND PATCH] mm, oom_reaper: gather each vma to prevent leaking TLB entry
Date: Fri, 10 Nov 2017 11:15:29 +0100	[thread overview]
Message-ID: <20171110101529.op6yaxtdke2p4bsh@dhcp22.suse.cz> (raw)
In-Reply-To: <20171110001933.GA12421@bbox>

On Fri 10-11-17 09:19:33, Minchan Kim wrote:
> On Tue, Nov 07, 2017 at 09:54:53AM +0000, Wang Nan wrote:
> > tlb_gather_mmu(&tlb, mm, 0, -1) means gathering the whole virtual memory
> > space. In this case, tlb->fullmm is true. Some archs like arm64 doesn't
> > flush TLB when tlb->fullmm is true:
> > 
> >   commit 5a7862e83000 ("arm64: tlbflush: avoid flushing when fullmm == 1").
> > 
> > Which makes leaking of tlb entries.
> 
> That means soft-dirty which has used tlb_gather_mmu with fullmm could be
> broken via losing write-protection bit once it supports arm64 in future?
> 
> If so, it would be better to use TASK_SIZE rather than -1 in tlb_gather_mmu.
> Of course, it's a off-topic.

I wouldn't play tricks like that. And maybe the API itself could be more
explicit. E.g. add a lazy parameter which would allow arch specific code
to not flush if it is sure that nobody can actually stumble over missed
flush. E.g. the following?

diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index d5562f9ce600..fe9042aee8e9 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -149,7 +149,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-			unsigned long start, unsigned long end)
+			unsigned long start, unsigned long end,
+			bool lazy)
 {
 	tlb->mm = mm;
 	tlb->fullmm = !(start | (end+1));
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index ffdaea7954bb..7adde19b2bcc 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -43,7 +43,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 	 * The ASID allocator will either invalidate the ASID or mark
 	 * it as used.
 	 */
-	if (tlb->fullmm)
+	if (tlb->lazy)
 		return;
 
 	/*
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index cbe5ac3699bf..50c440f5b7bc 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -169,7 +169,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-			unsigned long start, unsigned long end)
+			unsigned long start, unsigned long end,
+			bool lazy)
 {
 	tlb->mm = mm;
 	tlb->max = ARRAY_SIZE(tlb->local);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 2eb8ff0d6fca..2310657b64c4 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -49,7 +49,8 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-			unsigned long start, unsigned long end)
+			unsigned long start, unsigned long end,
+			bool lazy)
 {
 	tlb->mm = mm;
 	tlb->start = start;
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 51a8bc967e75..ae4c50a7c1ec 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -37,7 +37,7 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-		unsigned long start, unsigned long end)
+		unsigned long start, unsigned long end, bool lazy)
 {
 	tlb->mm = mm;
 	tlb->start = start;
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 344d95619d03..f24af66d07a4 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -46,7 +46,7 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-		unsigned long start, unsigned long end)
+		unsigned long start, unsigned long end, bool lazy)
 {
 	tlb->mm = mm;
 	tlb->start = start;
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde44de8c..c312fcd5a953 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -102,6 +102,7 @@ struct mmu_gather {
 	/* we have performed an operation which
 	 * requires a complete flush of the tlb */
 				need_flush_all : 1;
+				lazy : 1;
 
 	struct mmu_gather_batch *active;
 	struct mmu_gather_batch	local;
@@ -113,7 +114,8 @@ struct mmu_gather {
 #define HAVE_GENERIC_MMU_GATHER
 
 void arch_tlb_gather_mmu(struct mmu_gather *tlb,
-	struct mm_struct *mm, unsigned long start, unsigned long end);
+	struct mm_struct *mm, unsigned long start, unsigned long end,
+	bool lazy);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 			 unsigned long start, unsigned long end, bool force);
diff --git a/mm/memory.c b/mm/memory.c
index 590709e84a43..7dfdd4d8224f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -218,13 +218,15 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
 }
 
 void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-				unsigned long start, unsigned long end)
+				unsigned long start, unsigned long end,
+				bool lazy)
 {
 	tlb->mm = mm;
 
 	/* Is it from 0 to ~0? */
 	tlb->fullmm     = !(start | (end+1));
 	tlb->need_flush_all = 0;
+	tlb->lazy	= lazy;
 	tlb->local.next = NULL;
 	tlb->local.nr   = 0;
 	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
@@ -408,7 +410,18 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 			unsigned long start, unsigned long end)
 {
-	arch_tlb_gather_mmu(tlb, mm, start, end);
+	arch_tlb_gather_mmu(tlb, mm, start, end, false);
+	inc_tlb_flush_pending(tlb->mm);
+}
+
+/* tlb_gather_mmu_lazy
+ * 	Basically same as tlb_gather_mmu except it allows architectures to
+ * 	skip tlb flushing if they can ensure that nobody will reuse tlb entries
+ */
+void tlb_gather_mmu_lazy(struct mmu_gather *tlb, struct mm_struct *mm,
+			unsigned long start, unsigned long end)
+{
+	arch_tlb_gather_mmu(tlb, mm, start, end, true);
 	inc_tlb_flush_pending(tlb->mm);
 }
 
diff --git a/mm/mmap.c b/mm/mmap.c
index 680506faceae..43594a6a2eac 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2997,7 +2997,7 @@ void exit_mmap(struct mm_struct *mm)
 
 	lru_add_drain();
 	flush_cache_mm(mm);
-	tlb_gather_mmu(&tlb, mm, 0, -1);
+	tlb_gather_mmu_lazy(&tlb, mm, 0, -1);
 	/* update_hiwater_rss(mm) here? but nobody should be looking */
 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
 	unmap_vmas(&tlb, vma, 0, -1);

> However, I want to add a big fat comment in tlb_gather_mmu to warn "TLB
> flushing with (0, -1) can be skipped on some architectures" so upcoming
> users can care of.

Yes, this would be really useful if the api is not explicit.
-- 
Michal Hocko
SUSE Labs

WARNING: multiple messages have this Message-ID (diff)
From: Michal Hocko <mhocko@kernel.org>
To: Minchan Kim <minchan@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	will.deacon@arm.com, Bob Liu <liubo95@huawei.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	David Rientjes <rientjes@google.com>,
	Ingo Molnar <mingo@kernel.org>, Roman Gushchin <guro@fb.com>,
	Konstantin Khlebnikov <khlebnikov@yandex-team.ru>,
	Andrea Arcangeli <aarcange@redhat.com>
Subject: Re: [RESEND PATCH] mm, oom_reaper: gather each vma to prevent leaking TLB entry
Date: Fri, 10 Nov 2017 11:15:29 +0100	[thread overview]
Message-ID: <20171110101529.op6yaxtdke2p4bsh@dhcp22.suse.cz> (raw)
In-Reply-To: <20171110001933.GA12421@bbox>

On Fri 10-11-17 09:19:33, Minchan Kim wrote:
> On Tue, Nov 07, 2017 at 09:54:53AM +0000, Wang Nan wrote:
> > tlb_gather_mmu(&tlb, mm, 0, -1) means gathering the whole virtual memory
> > space. In this case, tlb->fullmm is true. Some archs like arm64 doesn't
> > flush TLB when tlb->fullmm is true:
> > 
> >   commit 5a7862e83000 ("arm64: tlbflush: avoid flushing when fullmm == 1").
> > 
> > Which makes leaking of tlb entries.
> 
> That means soft-dirty which has used tlb_gather_mmu with fullmm could be
> broken via losing write-protection bit once it supports arm64 in future?
> 
> If so, it would be better to use TASK_SIZE rather than -1 in tlb_gather_mmu.
> Of course, it's a off-topic.

I wouldn't play tricks like that. And maybe the API itself could be more
explicit. E.g. add a lazy parameter which would allow arch specific code
to not flush if it is sure that nobody can actually stumble over missed
flush. E.g. the following?

diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index d5562f9ce600..fe9042aee8e9 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -149,7 +149,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-			unsigned long start, unsigned long end)
+			unsigned long start, unsigned long end,
+			bool lazy)
 {
 	tlb->mm = mm;
 	tlb->fullmm = !(start | (end+1));
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index ffdaea7954bb..7adde19b2bcc 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -43,7 +43,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 	 * The ASID allocator will either invalidate the ASID or mark
 	 * it as used.
 	 */
-	if (tlb->fullmm)
+	if (tlb->lazy)
 		return;
 
 	/*
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index cbe5ac3699bf..50c440f5b7bc 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -169,7 +169,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-			unsigned long start, unsigned long end)
+			unsigned long start, unsigned long end,
+			bool lazy)
 {
 	tlb->mm = mm;
 	tlb->max = ARRAY_SIZE(tlb->local);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 2eb8ff0d6fca..2310657b64c4 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -49,7 +49,8 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-			unsigned long start, unsigned long end)
+			unsigned long start, unsigned long end,
+			bool lazy)
 {
 	tlb->mm = mm;
 	tlb->start = start;
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 51a8bc967e75..ae4c50a7c1ec 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -37,7 +37,7 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-		unsigned long start, unsigned long end)
+		unsigned long start, unsigned long end, bool lazy)
 {
 	tlb->mm = mm;
 	tlb->start = start;
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 344d95619d03..f24af66d07a4 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -46,7 +46,7 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 
 static inline void
 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-		unsigned long start, unsigned long end)
+		unsigned long start, unsigned long end, bool lazy)
 {
 	tlb->mm = mm;
 	tlb->start = start;
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde44de8c..c312fcd5a953 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -102,6 +102,7 @@ struct mmu_gather {
 	/* we have performed an operation which
 	 * requires a complete flush of the tlb */
 				need_flush_all : 1;
+				lazy : 1;
 
 	struct mmu_gather_batch *active;
 	struct mmu_gather_batch	local;
@@ -113,7 +114,8 @@ struct mmu_gather {
 #define HAVE_GENERIC_MMU_GATHER
 
 void arch_tlb_gather_mmu(struct mmu_gather *tlb,
-	struct mm_struct *mm, unsigned long start, unsigned long end);
+	struct mm_struct *mm, unsigned long start, unsigned long end,
+	bool lazy);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 			 unsigned long start, unsigned long end, bool force);
diff --git a/mm/memory.c b/mm/memory.c
index 590709e84a43..7dfdd4d8224f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -218,13 +218,15 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
 }
 
 void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-				unsigned long start, unsigned long end)
+				unsigned long start, unsigned long end,
+				bool lazy)
 {
 	tlb->mm = mm;
 
 	/* Is it from 0 to ~0? */
 	tlb->fullmm     = !(start | (end+1));
 	tlb->need_flush_all = 0;
+	tlb->lazy	= lazy;
 	tlb->local.next = NULL;
 	tlb->local.nr   = 0;
 	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
@@ -408,7 +410,18 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 			unsigned long start, unsigned long end)
 {
-	arch_tlb_gather_mmu(tlb, mm, start, end);
+	arch_tlb_gather_mmu(tlb, mm, start, end, false);
+	inc_tlb_flush_pending(tlb->mm);
+}
+
+/* tlb_gather_mmu_lazy
+ * 	Basically same as tlb_gather_mmu except it allows architectures to
+ * 	skip tlb flushing if they can ensure that nobody will reuse tlb entries
+ */
+void tlb_gather_mmu_lazy(struct mmu_gather *tlb, struct mm_struct *mm,
+			unsigned long start, unsigned long end)
+{
+	arch_tlb_gather_mmu(tlb, mm, start, end, true);
 	inc_tlb_flush_pending(tlb->mm);
 }
 
diff --git a/mm/mmap.c b/mm/mmap.c
index 680506faceae..43594a6a2eac 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2997,7 +2997,7 @@ void exit_mmap(struct mm_struct *mm)
 
 	lru_add_drain();
 	flush_cache_mm(mm);
-	tlb_gather_mmu(&tlb, mm, 0, -1);
+	tlb_gather_mmu_lazy(&tlb, mm, 0, -1);
 	/* update_hiwater_rss(mm) here? but nobody should be looking */
 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
 	unmap_vmas(&tlb, vma, 0, -1);

> However, I want to add a big fat comment in tlb_gather_mmu to warn "TLB
> flushing with (0, -1) can be skipped on some architectures" so upcoming
> users can care of.

Yes, this would be really useful if the api is not explicit.
-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2017-11-10 10:15 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-11-07  9:54 [RESEND PATCH] mm, oom_reaper: gather each vma to prevent leaking TLB entry Wang Nan
2017-11-07  9:54 ` Wang Nan
2017-11-07 10:09 ` Michal Hocko
2017-11-07 10:09   ` Michal Hocko
2017-11-10  0:19 ` Minchan Kim
2017-11-10  0:19   ` Minchan Kim
2017-11-10 10:15   ` Michal Hocko [this message]
2017-11-10 10:15     ` Michal Hocko
2017-11-10 12:26     ` [PATCH] arch, mm: introduce arch_tlb_gather_mmu_lazy (was: Re: [RESEND PATCH] mm, oom_reaper: gather each vma to prevent) " Michal Hocko
2017-11-10 12:26       ` Michal Hocko
2017-11-13  0:28       ` Minchan Kim
2017-11-13  0:28         ` Minchan Kim
2017-11-13  9:51         ` Michal Hocko
2017-11-13  9:51           ` Michal Hocko
2017-11-14  1:45           ` Minchan Kim
2017-11-14  1:45             ` Minchan Kim
2017-11-14  7:21             ` Michal Hocko
2017-11-14  7:21               ` Michal Hocko
2017-11-15  0:12               ` Minchan Kim
2017-11-15  0:12                 ` Minchan Kim
2017-11-15  8:14         ` Michal Hocko
2017-11-15  8:14           ` Michal Hocko
2017-11-16  0:44           ` Minchan Kim
2017-11-16  0:44             ` Minchan Kim
2017-11-16  9:19             ` Michal Hocko
2017-11-16  9:19               ` Michal Hocko
2017-11-15 17:33       ` Will Deacon
2017-11-15 17:33         ` Will Deacon
2017-11-16  9:20         ` Michal Hocko
2017-11-16  9:20           ` Michal Hocko
2017-11-20 14:24           ` Will Deacon
2017-11-20 14:24             ` Will Deacon
2017-11-20 16:04             ` [PATCH] arch, mm: introduce arch_tlb_gather_mmu_lazy Michal Hocko
2017-11-20 16:04               ` Michal Hocko
2017-11-22 19:30               ` Will Deacon
2017-11-22 19:30                 ` Will Deacon
2017-11-23  6:18                 ` Minchan Kim
2017-11-23  6:18                   ` Minchan Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171110101529.op6yaxtdke2p4bsh@dhcp22.suse.cz \
    --to=mhocko@kernel.org \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=guro@fb.com \
    --cc=khlebnikov@yandex-team.ru \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=liubo95@huawei.com \
    --cc=minchan@kernel.org \
    --cc=mingo@kernel.org \
    --cc=rientjes@google.com \
    --cc=wangnan0@huawei.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.