From: Nadav Amit <nadav.amit@gmail.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Nadav Amit <namit@vmware.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Andrew Morton <akpm@linux-foundation.org>,
Andy Lutomirski <luto@kernel.org>,
Dave Hansen <dave.hansen@linux.intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Thomas Gleixner <tglx@linutronix.de>,
Will Deacon <will@kernel.org>, Yu Zhao <yuzhao@google.com>,
Nick Piggin <npiggin@gmail.com>,
x86@kernel.org
Subject: [RFC 02/20] mm/mprotect: use mmu_gather
Date: Sat, 30 Jan 2021 16:11:14 -0800 [thread overview]
Message-ID: <20210131001132.3368247-3-namit@vmware.com> (raw)
In-Reply-To: <20210131001132.3368247-1-namit@vmware.com>
From: Nadav Amit <namit@vmware.com>
change_pXX_range() currently does not use mmu_gather, but instead
implements its own deferred TLB flushes scheme. This both complicates
the code, as developers need to be aware of different invalidation
schemes, and prevents.
Use mmu_gather in change_pXX_range(). As the pages are not released,
only record the flushed range using tlb_flush_pXX_range().
Signed-off-by: Nadav Amit <namit@vmware.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: x86@kernel.org
---
include/linux/huge_mm.h | 3 ++-
mm/huge_memory.c | 4 +++-
mm/mprotect.c | 51 ++++++++++++++++++++---------------------
3 files changed, 30 insertions(+), 28 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 6a19f35f836b..6eff7f59a778 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -37,7 +37,8 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
- pgprot_t newprot, unsigned long cp_flags);
+ pgprot_t newprot, unsigned long cp_flags,
+ struct mmu_gather *tlb);
vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
pgprot_t pgprot, bool write);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9237976abe72..c345b8b06183 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1797,7 +1797,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
* - HPAGE_PMD_NR is protections changed and TLB flush necessary
*/
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot, unsigned long cp_flags)
+ unsigned long addr, pgprot_t newprot, unsigned long cp_flags,
+ struct mmu_gather *tlb)
{
struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl;
@@ -1885,6 +1886,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
entry = pmd_clear_uffd_wp(entry);
}
ret = HPAGE_PMD_NR;
+ tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
set_pmd_at(mm, addr, pmd, entry);
BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
unlock:
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ab709023e9aa..632d5a677d3f 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -32,12 +32,13 @@
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include <asm/tlb.h>
#include "internal.h"
-static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end, pgprot_t newprot,
- unsigned long cp_flags)
+static unsigned long change_pte_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
+ unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pte_t *pte, oldpte;
spinlock_t *ptl;
@@ -138,6 +139,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
ptent = pte_mkwrite(ptent);
}
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
+ tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
pages++;
} else if (is_swap_pte(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
@@ -209,9 +211,9 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
return 0;
}
-static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
- pud_t *pud, unsigned long addr, unsigned long end,
- pgprot_t newprot, unsigned long cp_flags)
+static inline unsigned long change_pmd_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
+ unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pmd_t *pmd;
unsigned long next;
@@ -252,7 +254,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
__split_huge_pmd(vma, pmd, addr, false, NULL);
} else {
int nr_ptes = change_huge_pmd(vma, pmd, addr,
- newprot, cp_flags);
+ newprot, cp_flags, tlb);
if (nr_ptes) {
if (nr_ptes == HPAGE_PMD_NR) {
@@ -266,8 +268,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
}
/* fall through, the trans huge pmd just split */
}
- this_pages = change_pte_range(vma, pmd, addr, next, newprot,
- cp_flags);
+ this_pages = change_pte_range(tlb, vma, pmd, addr, next,
+ newprot, cp_flags);
pages += this_pages;
next:
cond_resched();
@@ -281,9 +283,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
return pages;
}
-static inline unsigned long change_pud_range(struct vm_area_struct *vma,
- p4d_t *p4d, unsigned long addr, unsigned long end,
- pgprot_t newprot, unsigned long cp_flags)
+static inline unsigned long change_pud_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
+ unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pud_t *pud;
unsigned long next;
@@ -294,16 +296,16 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
- pages += change_pmd_range(vma, pud, addr, next, newprot,
+ pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
cp_flags);
} while (pud++, addr = next, addr != end);
return pages;
}
-static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
- pgd_t *pgd, unsigned long addr, unsigned long end,
- pgprot_t newprot, unsigned long cp_flags)
+static inline unsigned long change_p4d_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
+ unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
p4d_t *p4d;
unsigned long next;
@@ -314,7 +316,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d))
continue;
- pages += change_pud_range(vma, p4d, addr, next, newprot,
+ pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
cp_flags);
} while (p4d++, addr = next, addr != end);
@@ -328,25 +330,22 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
unsigned long next;
- unsigned long start = addr;
unsigned long pages = 0;
+ struct mmu_gather tlb;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
- flush_cache_range(vma, addr, end);
- inc_tlb_flush_pending(mm);
+ tlb_gather_mmu(&tlb, mm);
+ tlb_start_vma(&tlb, vma);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- pages += change_p4d_range(vma, pgd, addr, next, newprot,
+ pages += change_p4d_range(&tlb, vma, pgd, addr, next, newprot,
cp_flags);
} while (pgd++, addr = next, addr != end);
-
- /* Only flush the TLB if we actually modified any entries: */
- if (pages)
- flush_tlb_range(vma, start, end);
- dec_tlb_flush_pending(mm);
+ tlb_end_vma(&tlb, vma);
+ tlb_finish_mmu(&tlb);
return pages;
}
--
2.25.1
next prev parent reply other threads:[~2021-01-31 0:17 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-31 0:11 [RFC 00/20] TLB batching consolidation and enhancements Nadav Amit
2021-01-31 0:11 ` [RFC 01/20] mm/tlb: fix fullmm semantics Nadav Amit
2021-01-31 1:02 ` Andy Lutomirski
2021-01-31 1:19 ` Nadav Amit
2021-01-31 2:57 ` Andy Lutomirski
2021-02-01 7:30 ` Nadav Amit
2021-02-01 11:36 ` Peter Zijlstra
2021-02-02 9:32 ` Nadav Amit
2021-02-02 11:00 ` Peter Zijlstra
2021-02-02 21:35 ` Nadav Amit
2021-02-03 9:44 ` Will Deacon
2021-02-04 3:20 ` Nadav Amit
2021-01-31 0:11 ` Nadav Amit [this message]
2021-01-31 0:11 ` [RFC 03/20] mm/mprotect: do not flush on permission promotion Nadav Amit
2021-01-31 1:07 ` Andy Lutomirski
2021-01-31 1:17 ` Nadav Amit
2021-01-31 2:59 ` Andy Lutomirski
[not found] ` <7a6de15a-a570-31f2-14d6-a8010296e694@citrix.com>
2021-02-01 5:58 ` Nadav Amit
2021-02-01 15:38 ` Andrew Cooper
2021-01-31 0:11 ` [RFC 04/20] mm/mapping_dirty_helpers: use mmu_gather Nadav Amit
2021-01-31 0:11 ` [RFC 05/20] mm/tlb: move BATCHED_UNMAP_TLB_FLUSH to tlb.h Nadav Amit
2021-01-31 0:11 ` [RFC 06/20] fs/task_mmu: use mmu_gather interface of clear-soft-dirty Nadav Amit
2021-01-31 0:11 ` [RFC 07/20] mm: move x86 tlb_gen to generic code Nadav Amit
2021-01-31 18:26 ` Andy Lutomirski
2021-01-31 0:11 ` [RFC 08/20] mm: store completed TLB generation Nadav Amit
2021-01-31 20:32 ` Andy Lutomirski
2021-02-01 7:28 ` Nadav Amit
2021-02-01 16:53 ` Andy Lutomirski
2021-02-01 11:52 ` Peter Zijlstra
2021-01-31 0:11 ` [RFC 09/20] mm: create pte/pmd_tlb_flush_pending() Nadav Amit
2021-01-31 0:11 ` [RFC 10/20] mm: add pte_to_page() Nadav Amit
2021-01-31 0:11 ` [RFC 11/20] mm/tlb: remove arch-specific tlb_start/end_vma() Nadav Amit
2021-02-01 12:09 ` Peter Zijlstra
2021-02-02 6:41 ` Nicholas Piggin
2021-02-02 7:20 ` Nadav Amit
2021-02-02 9:31 ` Peter Zijlstra
2021-02-02 9:54 ` Nadav Amit
2021-02-02 11:04 ` Peter Zijlstra
2021-01-31 0:11 ` [RFC 12/20] mm/tlb: save the VMA that is flushed during tlb_start_vma() Nadav Amit
2021-02-01 12:28 ` Peter Zijlstra
2021-01-31 0:11 ` [RFC 13/20] mm/tlb: introduce tlb_start_ptes() and tlb_end_ptes() Nadav Amit
[not found] ` <YBaBcc2jEGaxuxH0@fedora.tometzki.de>
2021-02-01 7:29 ` Nadav Amit
2021-02-01 13:19 ` Peter Zijlstra
2021-02-01 23:00 ` Nadav Amit
2021-01-31 0:11 ` [RFC 14/20] mm: move inc/dec_tlb_flush_pending() to mmu_gather.c Nadav Amit
2021-01-31 0:11 ` [RFC 15/20] mm: detect deferred TLB flushes in vma granularity Nadav Amit
2021-02-01 22:04 ` Nadav Amit
2021-02-02 0:14 ` Andy Lutomirski
2021-02-02 20:51 ` Nadav Amit
2021-02-04 4:35 ` Andy Lutomirski
2021-01-31 0:11 ` [RFC 16/20] mm/tlb: per-page table generation tracking Nadav Amit
2021-01-31 0:11 ` [RFC 17/20] mm/tlb: updated completed deferred TLB flush conditionally Nadav Amit
2021-01-31 0:11 ` [RFC 18/20] mm: make mm_cpumask() volatile Nadav Amit
2021-01-31 0:11 ` [RFC 19/20] lib/cpumask: introduce cpumask_atomic_or() Nadav Amit
2021-01-31 0:11 ` [RFC 20/20] mm/rmap: avoid potential races Nadav Amit
2021-08-23 8:05 ` Huang, Ying
2021-08-23 15:50 ` Nadav Amit
2021-08-24 0:36 ` Huang, Ying
2021-01-31 0:39 ` [RFC 00/20] TLB batching consolidation and enhancements Andy Lutomirski
2021-01-31 1:08 ` Nadav Amit
2021-01-31 3:30 ` Nicholas Piggin
2021-01-31 7:57 ` Nadav Amit
2021-01-31 8:14 ` Nadav Amit
2021-02-01 12:44 ` Peter Zijlstra
2021-02-02 7:14 ` Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210131001132.3368247-3-namit@vmware.com \
--to=nadav.amit@gmail.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=dave.hansen@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=namit@vmware.com \
--cc=npiggin@gmail.com \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yuzhao@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).