From: Chih-En Lin <shiyn.lin@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Qi Zheng <zhengqi.arch@bytedance.com>,
David Hildenbrand <david@redhat.com>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
John Hubbard <jhubbard@nvidia.com>, Nadav Amit <namit@vmware.com>,
Barry Song <baohua@kernel.org>,
Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Steven Rostedt <rostedt@goodmis.org>,
Masami Hiramatsu <mhiramat@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Mark Rutland <mark.rutland@arm.com>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
Ian Rogers <irogers@google.com>,
Adrian Hunter <adrian.hunter@intel.com>,
Yu Zhao <yuzhao@google.com>, Steven Barrett <steven@liquorix.net>,
Juergen Gross <jgross@suse.com>, Peter Xu <peterx@redhat.com>,
Kefeng Wang <wangkefeng.wang@huawei.com>,
Tong Tiangen <tongtiangen@huawei.com>,
Christoph Hellwig <hch@infradead.org>,
"Liam R. Howlett" <Liam.Howlett@Oracle.com>,
Yang Shi <shy828301@gmail.com>, Vlastimil Babka <vbabka@suse.cz>,
Alex Sierra <alex.sierra@amd.com>,
Vincent Whitchurch <vincent.whitchurch@axis.com>,
Anshuman Khandual <anshuman.khandual@arm.com>,
Li kunyu <kunyu@nfschina.com>, Liu Shixin <liushixin2@huawei.com>,
Hugh Dickins <hughd@google.com>, Minchan Kim <minchan@kernel.org>,
Joey Gouly <joey.gouly@arm.com>,
Chih-En Lin <shiyn.lin@gmail.com>, Michal Hocko <mhocko@suse.com>,
Suren Baghdasaryan <surenb@google.com>,
"Zach O'Keefe" <zokeefe@google.com>,
Gautam Menghani <gautammenghani201@gmail.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Mark Brown <broonie@kernel.org>,
"Eric W. Biederman" <ebiederm@xmission.com>,
Andrei Vagin <avagin@gmail.com>,
Shakeel Butt <shakeelb@google.com>,
Daniel Bristot de Oliveira <bristot@kernel.org>,
"Jason A. Donenfeld" <Jason@zx2c4.com>,
Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Alexey Gladkov <legion@kernel.org>,
x86@kernel.org, linux-kernel@vger.kernel.org,
linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
linux-trace-kernel@vger.kernel.org,
linux-perf-users@vger.kernel.org,
Dinglan Peng <peng301@purdue.edu>,
Pedro Fonseca <pfonseca@purdue.edu>,
Jim Huang <jserv@ccns.ncku.edu.tw>,
Huichun Feng <foxhoundsk.tw@gmail.com>
Subject: [PATCH v5 05/17] mm: Handle COW-ed PTE during zapping
Date: Fri, 14 Apr 2023 22:23:29 +0800 [thread overview]
Message-ID: <20230414142341.354556-6-shiyn.lin@gmail.com> (raw)
In-Reply-To: <20230414142341.354556-1-shiyn.lin@gmail.com>
To support the zap functionally for COW-ed PTE, we need to zap the
entire PTE table each time instead of partially zapping pages.
Therefore, if the zap range covers the entire PTE table, we can
handle de-account, remove the rmap, etc. However we shouldn't modify
the entries when there are still someone references to the COW-ed
PTE. Otherwise, if only the zapped process references to this COW-ed
PTE, we just reuse it and do the normal zapping.
Signed-off-by: Chih-En Lin <shiyn.lin@gmail.com>
---
mm/memory.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 87 insertions(+), 5 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index f8a87a0fc382..7908e20f802a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -192,6 +192,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
+#ifdef CONFIG_COW_PTE
+ if (test_bit(MMF_COW_PTE, &tlb->mm->flags)) {
+ if (!pmd_none(*pmd) && !pmd_write(*pmd))
+ VM_WARN_ON(cow_pte_count(pmd) != 1);
+ }
+#endif
if (pmd_none_or_clear_bad(pmd))
continue;
free_pte_range(tlb, pmd, addr);
@@ -1656,6 +1662,7 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
#define ZAP_PTE_INIT 0x0000
#define ZAP_PTE_FORCE_FLUSH 0x0001
+#define ZAP_PTE_IS_SHARED 0x0002
struct zap_pte_details {
pte_t **pte;
@@ -1681,9 +1688,13 @@ zap_present_pte(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (unlikely(!should_zap_page(details, page)))
return 0;
- ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
+ if (pte_details->flags & ZAP_PTE_IS_SHARED)
+ ptent = ptep_get(pte);
+ else
+ ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
- zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+ if (!(pte_details->flags & ZAP_PTE_IS_SHARED))
+ zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
if (unlikely(!page))
return 0;
@@ -1767,8 +1778,10 @@ zap_nopresent_pte(struct mmu_gather *tlb, struct vm_area_struct *vma,
/* We should have covered all the swap entry types */
WARN_ON_ONCE(1);
}
- pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
- zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+ if (!(pte_details->flags & ZAP_PTE_IS_SHARED)) {
+ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+ }
}
static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -1785,6 +1798,36 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
.flags = ZAP_PTE_INIT,
.pte = &pte,
};
+#ifdef CONFIG_COW_PTE
+ unsigned long orig_addr = addr;
+
+ if (test_bit(MMF_COW_PTE, &mm->flags) && !pmd_write(*pmd)) {
+ if (!range_in_vma(vma, addr & PMD_MASK,
+ (addr + PMD_SIZE) & PMD_MASK)) {
+ /*
+ * We cannot promise this COW-ed PTE will also be zap
+ * with the rest of VMAs. So, break COW PTE here.
+ */
+ break_cow_pte(vma, pmd, addr);
+ } else {
+ /*
+ * We free the batched memory before we handle
+ * COW-ed PTE.
+ */
+ tlb_flush_mmu(tlb);
+ end = (addr + PMD_SIZE) & PMD_MASK;
+ addr = addr & PMD_MASK;
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (cow_pte_count(pmd) == 1) {
+ /* Reuse COW-ed PTE */
+ pmd_t new = pmd_mkwrite(*pmd);
+ set_pmd_at(tlb->mm, addr, pmd, new);
+ } else
+ pte_details.flags |= ZAP_PTE_IS_SHARED;
+ pte_unmap_unlock(start_pte, ptl);
+ }
+ }
+#endif
tlb_change_page_size(tlb, PAGE_SIZE);
again:
@@ -1828,7 +1871,16 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
*/
if (pte_details.flags & ZAP_PTE_FORCE_FLUSH) {
pte_details.flags &= ~ZAP_PTE_FORCE_FLUSH;
- tlb_flush_mmu(tlb);
+ /*
+ * With COW-ed PTE, we defer freeing the batched memory until
+ * after we have actually cleared the COW-ed PTE's pmd entry.
+ * Since, if we are the only ones still referencing the COW-ed
+ * PTe table after we have freed the batched memory, the page
+ * table check will report a bug with anon_map_count != 0 in
+ * page_table_check_zero().
+ */
+ if (!(pte_details.flags & ZAP_PTE_IS_SHARED))
+ tlb_flush_mmu(tlb);
}
if (addr != end) {
@@ -1836,6 +1888,36 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
goto again;
}
+#ifdef CONFIG_COW_PTE
+ if (pte_details.flags & ZAP_PTE_IS_SHARED) {
+ start_pte = pte_offset_map_lock(mm, pmd, orig_addr, &ptl);
+ if (!pmd_put_pte(pmd)) {
+ pmd_t new = pmd_mkwrite(*pmd);
+ set_pmd_at(tlb->mm, addr, pmd, new);
+ /*
+ * We are the only ones who still referencing this.
+ * Clear the page table check before we free the
+ * batched memory.
+ */
+ page_table_check_pte_clear_range(mm, orig_addr, *pmd);
+ pte_unmap_unlock(start_pte, ptl);
+ /* free the batched memory and flush the TLB. */
+ tlb_flush_mmu(tlb);
+ free_pte_range(tlb, pmd, addr);
+ } else {
+ pmd_clear(pmd);
+ pte_unmap_unlock(start_pte, ptl);
+ mm_dec_nr_ptes(tlb->mm);
+ /*
+ * Someone still referencing to the table,
+ * we just flush TLB here.
+ */
+ flush_tlb_range(vma, addr & PMD_MASK,
+ (addr + PMD_SIZE) & PMD_MASK);
+ }
+ }
+#endif
+
return addr;
}
--
2.34.1
next prev parent reply other threads:[~2023-04-14 14:25 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-14 14:23 [PATCH v5 00/17] Introduce Copy-On-Write to Page Table Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 01/17] mm: Split out the present cases from zap_pte_range() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 02/17] mm: Allow user to control COW PTE via prctl Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 03/17] mm: Add Copy-On-Write PTE to fork() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 04/17] mm: Add break COW PTE fault and helper functions Chih-En Lin
2023-04-14 14:23 ` Chih-En Lin [this message]
2023-04-14 14:23 ` [PATCH v5 06/17] mm/rmap: Break COW PTE in rmap walking Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 07/17] mm/khugepaged: Break COW PTE before scanning pte Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 08/17] mm/ksm: Break COW PTE before modify shared PTE Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 09/17] mm/madvise: Handle COW-ed PTE with madvise() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 10/17] mm/gup: Trigger break COW PTE before calling follow_pfn_pte() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 11/17] mm/mprotect: Break COW PTE before changing protection Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 12/17] mm/userfaultfd: Support COW PTE Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 13/17] mm/migrate_device: " Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 14/17] fs/proc: Support COW PTE with clear_refs_write Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 15/17] events/uprobes: Break COW PTE before replacing page Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 16/17] mm: fork: Enable COW PTE to fork system call Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 17/17] mm: Check the unexpected modification of COW-ed PTE Chih-En Lin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230414142341.354556-6-shiyn.lin@gmail.com \
--to=shiyn.lin@gmail.com \
--cc=Jason@zx2c4.com \
--cc=Liam.Howlett@Oracle.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=akpm@linux-foundation.org \
--cc=alex.sierra@amd.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=anshuman.khandual@arm.com \
--cc=avagin@gmail.com \
--cc=baohua@kernel.org \
--cc=bp@alien8.de \
--cc=bristot@kernel.org \
--cc=broonie@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=christophe.leroy@csgroup.eu \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=ebiederm@xmission.com \
--cc=foxhoundsk.tw@gmail.com \
--cc=gautammenghani201@gmail.com \
--cc=gregkh@linuxfoundation.org \
--cc=hch@infradead.org \
--cc=hpa@zytor.com \
--cc=hughd@google.com \
--cc=irogers@google.com \
--cc=jgross@suse.com \
--cc=jhubbard@nvidia.com \
--cc=joey.gouly@arm.com \
--cc=jolsa@kernel.org \
--cc=jserv@ccns.ncku.edu.tw \
--cc=kunyu@nfschina.com \
--cc=legion@kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=liushixin2@huawei.com \
--cc=mark.rutland@arm.com \
--cc=mhiramat@kernel.org \
--cc=mhocko@suse.com \
--cc=minchan@kernel.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=namit@vmware.com \
--cc=pasha.tatashin@soleen.com \
--cc=peng301@purdue.edu \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=pfonseca@purdue.edu \
--cc=rostedt@goodmis.org \
--cc=shakeelb@google.com \
--cc=shy828301@gmail.com \
--cc=steven@liquorix.net \
--cc=surenb@google.com \
--cc=tglx@linutronix.de \
--cc=tongtiangen@huawei.com \
--cc=vbabka@suse.cz \
--cc=vincent.whitchurch@axis.com \
--cc=wangkefeng.wang@huawei.com \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
--cc=yuzhao@google.com \
--cc=zhengqi.arch@bytedance.com \
--cc=zokeefe@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).