All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chih-En Lin <shiyn.lin@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Qi Zheng <zhengqi.arch@bytedance.com>,
	David Hildenbrand <david@redhat.com>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	John Hubbard <jhubbard@nvidia.com>, Nadav Amit <namit@vmware.com>,
	Barry Song <baohua@kernel.org>,
	Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Masami Hiramatsu <mhiramat@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
	Ian Rogers <irogers@google.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Yu Zhao <yuzhao@google.com>, Steven Barrett <steven@liquorix.net>,
	Juergen Gross <jgross@suse.com>, Peter Xu <peterx@redhat.com>,
	Kefeng Wang <wangkefeng.wang@huawei.com>,
	Tong Tiangen <tongtiangen@huawei.com>,
	Christoph Hellwig <hch@infradead.org>,
	"Liam R. Howlett" <Liam.Howlett@Oracle.com>,
	Yang Shi <shy828301@gmail.com>, Vlastimil Babka <vbabka@suse.cz>,
	Alex Sierra <alex.sierra@amd.com>,
	Vincent Whitchurch <vincent.whitchurch@axis.com>,
	Anshuman Khandual <anshuman.khandual@arm.com>,
	Li kunyu <kunyu@nfschina.com>, Liu Shixin <liushixin2@huawei.com>,
	Hugh Dickins <hughd@google.com>, Minchan Kim <minchan@kernel.org>,
	Joey Gouly <joey.gouly@arm.com>,
	Chih-En Lin <shiyn.lin@gmail.com>, Michal Hocko <mhocko@suse.com>,
	Suren Baghdasaryan <surenb@google.com>,
	"Zach O'Keefe" <zokeefe@google.com>,
	Gautam Menghani <gautammenghani201@gmail.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Mark Brown <broonie@kernel.org>,
	"Eric W. Biederman" <ebiederm@xmission.com>,
	Andrei Vagin <avagin@gmail.com>,
	Shakeel Butt <shakeelb@google.com>,
	Daniel Bristot de Oliveira <bristot@kernel.org>,
	"Jason A. Donenfeld" <Jason@zx2c4.com>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Alexey Gladkov <legion@kernel.org>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	linux-trace-kernel@vger.kernel.org,
	linux-perf-users@vger.kernel.org,
	Dinglan Peng <peng301@purdue.edu>,
	Pedro Fonseca <pfonseca@purdue.edu>,
	Jim Huang <jserv@ccns.ncku.edu.tw>,
	Huichun Feng <foxhoundsk.tw@gmail.com>
Subject: [PATCH v5 04/17] mm: Add break COW PTE fault and helper functions
Date: Fri, 14 Apr 2023 22:23:28 +0800	[thread overview]
Message-ID: <20230414142341.354556-5-shiyn.lin@gmail.com> (raw)
In-Reply-To: <20230414142341.354556-1-shiyn.lin@gmail.com>

Add the function, handle_cow_pte_fault(), to break (unshare) COW-ed PTE
with the page fault that will modify the PTE table or the mapped page
resided in COW-ed PTE (i.e., write, unshared, file read fault).

When breaking COW PTE, it first checks COW-ed PTE's refcount to try to
reuse it. If COW-ed PTE cannot be reused, allocates new PTE and
duplicates all pte entries in COW-ed PTE. Moreover, flush TLB when we
change the write protection of PTE.

In addition, provide the helper functions, break_cow_pte{,_range}(), to
let the other features (remap, THP, migration, swapfile, etc) to use.

Signed-off-by: Chih-En Lin <shiyn.lin@gmail.com>
---
 include/linux/mm.h      |  17 +++
 include/linux/pgtable.h |   6 +
 mm/memory.c             | 318 +++++++++++++++++++++++++++++++++++++++-
 mm/mmap.c               |   4 +
 mm/mremap.c             |   2 +
 mm/swapfile.c           |   2 +
 6 files changed, 348 insertions(+), 1 deletion(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 828f8a1b1e32..b4c9658ccd28 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2179,6 +2179,23 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
 int generic_error_remove_page(struct address_space *mapping, struct page *page);
 
+#ifdef CONFIG_COW_PTE
+int break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr);
+int break_cow_pte_range(struct vm_area_struct *vma, unsigned long start,
+			unsigned long end);
+#else
+static inline int break_cow_pte(struct vm_area_struct *vma,
+				pmd_t *pmd, unsigned long addr)
+{
+	return 0;
+}
+static inline int break_cow_pte_range(struct vm_area_struct *vma,
+				      unsigned long start, unsigned long end)
+{
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_MMU
 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
 				  unsigned long address, unsigned int flags,
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index c63cd44777ec..f177a9d48b70 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1378,6 +1378,12 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
 	if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
 		(IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
 		return 1;
+	/*
+	 * COW-ed PTE has write protection which can trigger pmd_bad().
+	 * To avoid this, return here if entry is write protection.
+	 */
+	if (!pmd_write(pmdval))
+		return 0;
 	if (unlikely(pmd_bad(pmdval))) {
 		pmd_clear_bad(pmd);
 		return 1;
diff --git a/mm/memory.c b/mm/memory.c
index 3b1c4a7e632c..f8a87a0fc382 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2166,6 +2166,8 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
 	if (retval)
 		goto out;
 	retval = -ENOMEM;
+	if (break_cow_pte(vma, NULL, addr))
+		goto out;
 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
 	if (!pte)
 		goto out;
@@ -2425,6 +2427,9 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 	pte_t *pte, entry;
 	spinlock_t *ptl;
 
+	if (break_cow_pte(vma, NULL, addr))
+		return VM_FAULT_OOM;
+
 	pte = get_locked_pte(mm, addr, &ptl);
 	if (!pte)
 		return VM_FAULT_OOM;
@@ -2802,6 +2807,10 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
 	BUG_ON(addr >= end);
 	pfn -= addr >> PAGE_SHIFT;
 	pgd = pgd_offset(mm, addr);
+
+	if (break_cow_pte_range(vma, addr, end))
+		return -ENOMEM;
+
 	flush_cache_range(vma, addr, end);
 	do {
 		next = pgd_addr_end(addr, end);
@@ -5192,6 +5201,285 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
 	return VM_FAULT_FALLBACK;
 }
 
+#ifdef CONFIG_COW_PTE
+/*
+ * Break (unshare) COW PTE
+ *
+ * Since the pte lock is held during all operations on the COW-ed PTE
+ * table, it should be safe to modify it's pmd entry as well, provided
+ * it has been ensured that the pmd entry points to a COW-ed PTE table
+ * rather than a huge page or default PTE. Otherwise, we should also
+ * consider holding the pmd lock as we do for the huge page.
+ */
+static vm_fault_t handle_cow_pte_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct mm_struct *mm = vma->vm_mm;
+	pmd_t *pmd = vmf->pmd;
+	unsigned long start, end, addr = vmf->address;
+	struct mmu_notifier_range range;
+	pmd_t new_entry, cowed_entry;
+	pte_t *orig_dst_pte, *orig_src_pte;
+	pte_t *dst_pte, *src_pte;
+	pgtable_t new_pte_table = NULL;
+	spinlock_t *src_ptl;
+	int ret = 0;
+
+	/* Do nothing with the fault that doesn't have PTE yet. */
+	if (pmd_none(*pmd) || pmd_write(*pmd))
+		return 0;
+	/* COW PTE doesn't handle huge page. */
+	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+		return 0;
+
+	start = addr & PMD_MASK;
+	end = (addr + PMD_SIZE) & PMD_MASK;
+	addr = start;
+
+	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
+				0, vma, mm, start, end);
+	/*
+	 * Because of the address range is PTE not only for the faulted
+	 * vma, it might have some unmatch situations since mmu notifier
+	 * will only reigster the faulted vma.
+	 * Do we really need to care about this kind of unmatch?
+	 */
+	mmu_notifier_invalidate_range_start(&range);
+	raw_write_seqcount_begin(&mm->write_protect_seq);
+
+	/*
+	 * Fast path, check if we are the only one faulted task
+	 * references to this COW-ed PTE, reuse it.
+	 */
+	src_pte = pte_offset_map(pmd, addr);
+	src_ptl = pte_lockptr(mm, pmd);
+	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+	if (cow_pte_count(pmd) == 1) {
+		pmd_t new = pmd_mkwrite(*pmd);
+		set_pmd_at(mm, addr, pmd, new);
+		pte_unmap_unlock(src_pte, src_ptl);
+		goto flush_tlb;
+	}
+	/* We don't hold the lock when allocating the new PTE. */
+	pte_unmap_unlock(src_pte, src_ptl);
+
+	/*
+	 * Slow path. Since we already did the accounting and still
+	 * sharing the mapped pages, we can just clone PTE.
+	 */
+
+	/*
+	 * Before acquiring the lock, we allocate the memory we may
+	 * possibly need.
+	 */
+	new_pte_table = pte_alloc_one(mm);
+	if (unlikely(!new_pte_table)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * To protect the pte table from the rmap and page table walk,
+	 * we should hold the lock of COW-ed PTE until all the operations
+	 * have been done including setting pmd entry, duplicating, and
+	 * decrease refcount.
+	 */
+	orig_src_pte = src_pte = pte_offset_map(pmd, addr);
+	src_ptl = pte_lockptr(mm, pmd);
+	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+
+	/* Before pouplate the new pte table, we store the cowed (old) one. */
+	cowed_entry = READ_ONCE(*pmd);
+
+	/*
+	 * Someone may also break COW PTE when we allocating the pte table.
+	 * So, let check refcount again.
+	 */
+	if (cow_pte_count(&cowed_entry) == 1) {
+		pmd_t new = pmd_mkwrite(*pmd);
+		set_pmd_at(mm, addr, pmd, new);
+		pte_unmap_unlock(src_pte, src_ptl);
+		goto flush_tlb;
+	}
+
+	/*
+	 * We will only set the new pte table to the pmd entry after finish
+	 * all the duplicating.
+	 * We first store the new table in another pmd entry even though we
+	 * have held the COW-ed PTE's lock. This is because, if we clear the
+	 * pmd entry assigned to the COW-ed PTe table, other places (e.g.,
+	 * another page fault) may allocate an empty PTe table, leading to
+	 * potential issues.
+	 */
+	pmd_clear(&new_entry);
+	pmd_populate(mm, &new_entry, new_pte_table);
+	/*
+	 * No one else excluding us can access to this new table, so we don't
+	 * have to hold the second pte lock.
+	 */
+	orig_dst_pte = dst_pte = pte_offset_map(&new_entry, addr);
+
+	arch_enter_lazy_mmu_mode();
+
+	/*
+	 * All the mapped pages in COW-ed PTE are COW mapping. We can
+	 * set the entries and leave other stuff to handle_pte_fault().
+	 */
+	do {
+		if (pte_none(*src_pte))
+			continue;
+		set_pte_at(mm, addr, dst_pte, *src_pte);
+	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
+
+	arch_leave_lazy_mmu_mode();
+
+	pte_unmap(orig_dst_pte);
+
+	/*
+	 * Decrease the refcount of COW-ed PTE.
+	 * In this path, we assume that someone is still using COW-ed PTE.
+	 * So, if the refcount is 1 before we decrease it, this might be
+	 * wrong.
+	 */
+	VM_WARN_ON(!pmd_put_pte(&cowed_entry));
+	VM_WARN_ON(!pmd_same(*pmd, cowed_entry));
+
+	/* Now, we can finally install the new PTE table to the pmd entry. */
+	set_pmd_at(mm, start, pmd, new_entry);
+	/*
+	 * We installed the new table, let cleanup the new_pte_table
+	 * variable to prevent pte_free() free it in the following.
+	 */
+	new_pte_table = NULL;
+	pte_unmap_unlock(orig_src_pte, src_ptl);
+
+flush_tlb:
+	/*
+	 * If we change the protection, flush TLB.
+	 * flush_tlb_range() will only use vma to get mm, we don't need
+	 * to consider the unmatch address range with vma problem here.
+	 *
+	 * Should we flush TLB when holding the pte lock?
+	 */
+	flush_tlb_range(vma, start, end);
+out:
+	raw_write_seqcount_end(&mm->write_protect_seq);
+	mmu_notifier_invalidate_range_end(&range);
+
+	if (new_pte_table)
+		pte_free(mm, new_pte_table);
+
+	return ret;
+}
+
+static inline int __break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd,
+				  unsigned long addr)
+{
+	struct vm_fault vmf = {
+		.vma = vma,
+		.address = addr & PAGE_MASK,
+		.pmd = pmd,
+	};
+
+	return handle_cow_pte_fault(&vmf);
+}
+
+/**
+ * break_cow_pte - duplicate/reuse shared, wprotected (COW-ed) PTE
+ * @vma: target vma want to break COW
+ * @pmd: pmd index that maps to the shared PTE
+ * @addr: the address trigger break COW PTE
+ *
+ * Return: zero on success, < 0 otherwise.
+ *
+ * The address needs to be in the range of shared and write portected
+ * PTE that the pmd index mapped. If pmd is NULL, it will get the pmd
+ * from vma. Duplicate COW-ed PTE when some still mapping to it.
+ * Otherwise, reuse COW-ed PTE.
+ * If the first attempt fails, it will wait for some time and try
+ * again. If it fails again, then the OOM killer will be called.
+ */
+int break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr)
+{
+	struct mm_struct *mm;
+	pgd_t *pgd;
+	p4d_t *p4d;
+	pud_t *pud;
+	int ret = 0;
+
+	if (!vma)
+		return -EINVAL;
+	mm = vma->vm_mm;
+
+	if (!test_bit(MMF_COW_PTE, &mm->flags))
+		return 0;
+
+	if (!pmd) {
+		pgd = pgd_offset(mm, addr);
+		if (pgd_none_or_clear_bad(pgd))
+			return 0;
+		p4d = p4d_offset(pgd, addr);
+		if (p4d_none_or_clear_bad(p4d))
+			return 0;
+		pud = pud_offset(p4d, addr);
+		if (pud_none_or_clear_bad(pud))
+			return 0;
+		pmd = pmd_offset(pud, addr);
+	}
+
+	/* We will check the type of pmd entry later. */
+
+	ret = __break_cow_pte(vma, pmd, addr);
+
+	if (unlikely(ret == -ENOMEM)) {
+		unsigned int cow_pte_alloc_sleep_millisecs = 60000;
+
+		schedule_timeout(msecs_to_jiffies(
+					cow_pte_alloc_sleep_millisecs));
+
+		ret = __break_cow_pte(vma, pmd, addr);
+		if (unlikely(ret == -ENOMEM))  {
+			struct oom_control oc = {
+				.gfp_mask = GFP_PGTABLE_USER,
+			};
+
+			mutex_lock(&oom_lock);
+			out_of_memory(&oc);
+			mutex_unlock(&oom_lock);
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * break_cow_pte_range - duplicate/reuse COW-ed PTE in a given range
+ * @vma: target vma want to break COW
+ * @start: the address of start breaking
+ * @end: the address of end breaking
+ *
+ * Return: zero on success, the number of failed otherwise.
+ */
+int break_cow_pte_range(struct vm_area_struct *vma, unsigned long start,
+			unsigned long end)
+{
+	unsigned long addr, next;
+	int nr_failed = 0;
+
+	if (!range_in_vma(vma, start, end))
+		return -EINVAL;
+
+	addr = start;
+	do {
+		next = pmd_addr_end(addr, end);
+		if (break_cow_pte(vma, NULL, addr))
+			nr_failed++;
+	} while (addr = next, addr != end);
+
+	return nr_failed;
+}
+#endif /* CONFIG_COW_PTE */
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -5267,8 +5555,13 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 			return do_fault(vmf);
 	}
 
-	if (!pte_present(vmf->orig_pte))
+	if (!pte_present(vmf->orig_pte)) {
+#ifdef CONFIG_COW_PTE
+		if (test_bit(MMF_COW_PTE, &vmf->vma->vm_mm->flags))
+			handle_cow_pte_fault(vmf);
+#endif
 		return do_swap_page(vmf);
+	}
 
 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
 		return do_numa_page(vmf);
@@ -5404,8 +5697,31 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 				return 0;
 			}
 		}
+#ifdef CONFIG_COW_PTE
+		/*
+		 * Duplicate COW-ed PTE when page fault will change the
+		 * mapped pages (write or unshared fault) or COW-ed PTE
+		 * (file mapped read fault, see do_read_fault()).
+		 */
+		if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE) ||
+		      vma->vm_ops) && test_bit(MMF_COW_PTE, &mm->flags)) {
+			ret = handle_cow_pte_fault(&vmf);
+			if (unlikely(ret == -ENOMEM))
+				return VM_FAULT_OOM;
+		}
+#endif
 	}
 
+#ifdef CONFIG_COW_PTE
+	/*
+	 * It's definitely will break the kernel when refcount of PTE
+	 * is higher than 1 and it is writeable in PMD entry. But we
+	 * want to see more information so just warning here.
+	 */
+	if (likely(!pmd_none(*vmf.pmd)))
+		VM_WARN_ON(cow_pte_count(vmf.pmd) > 1 && pmd_write(*vmf.pmd));
+#endif
+
 	return handle_pte_fault(&vmf);
 }
 
diff --git a/mm/mmap.c b/mm/mmap.c
index ff68a67a2a7c..ac1002e85d88 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2169,6 +2169,10 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 			return err;
 	}
 
+	err = break_cow_pte(vma, NULL, addr);
+	if (err)
+		return err;
+
 	new = vm_area_dup(vma);
 	if (!new)
 		return -ENOMEM;
diff --git a/mm/mremap.c b/mm/mremap.c
index 411a85682b58..0668e9ead65a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -534,6 +534,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
 		if (!old_pmd)
 			continue;
+		/* TLB flush twice time here? */
+		break_cow_pte(vma, old_pmd, old_addr);
 		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
 		if (!new_pmd)
 			break;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2c718f45745f..b7aa880957fd 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1919,6 +1919,8 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 		next = pmd_addr_end(addr, end);
 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
 			continue;
+		if (break_cow_pte(vma, pmd, addr))
+			return -ENOMEM;
 		ret = unuse_pte_range(vma, pmd, addr, next, type);
 		if (ret)
 			return ret;
-- 
2.34.1


  parent reply	other threads:[~2023-04-14 14:25 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-14 14:23 [PATCH v5 00/17] Introduce Copy-On-Write to Page Table Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 01/17] mm: Split out the present cases from zap_pte_range() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 02/17] mm: Allow user to control COW PTE via prctl Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 03/17] mm: Add Copy-On-Write PTE to fork() Chih-En Lin
2023-04-14 14:23 ` Chih-En Lin [this message]
2023-04-14 14:23 ` [PATCH v5 05/17] mm: Handle COW-ed PTE during zapping Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 06/17] mm/rmap: Break COW PTE in rmap walking Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 07/17] mm/khugepaged: Break COW PTE before scanning pte Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 08/17] mm/ksm: Break COW PTE before modify shared PTE Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 09/17] mm/madvise: Handle COW-ed PTE with madvise() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 10/17] mm/gup: Trigger break COW PTE before calling follow_pfn_pte() Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 11/17] mm/mprotect: Break COW PTE before changing protection Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 12/17] mm/userfaultfd: Support COW PTE Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 13/17] mm/migrate_device: " Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 14/17] fs/proc: Support COW PTE with clear_refs_write Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 15/17] events/uprobes: Break COW PTE before replacing page Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 16/17] mm: fork: Enable COW PTE to fork system call Chih-En Lin
2023-04-14 14:23 ` [PATCH v5 17/17] mm: Check the unexpected modification of COW-ed PTE Chih-En Lin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230414142341.354556-5-shiyn.lin@gmail.com \
    --to=shiyn.lin@gmail.com \
    --cc=Jason@zx2c4.com \
    --cc=Liam.Howlett@Oracle.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=alex.sierra@amd.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=anshuman.khandual@arm.com \
    --cc=avagin@gmail.com \
    --cc=baohua@kernel.org \
    --cc=bp@alien8.de \
    --cc=bristot@kernel.org \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=christophe.leroy@csgroup.eu \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=ebiederm@xmission.com \
    --cc=foxhoundsk.tw@gmail.com \
    --cc=gautammenghani201@gmail.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hch@infradead.org \
    --cc=hpa@zytor.com \
    --cc=hughd@google.com \
    --cc=irogers@google.com \
    --cc=jgross@suse.com \
    --cc=jhubbard@nvidia.com \
    --cc=joey.gouly@arm.com \
    --cc=jolsa@kernel.org \
    --cc=jserv@ccns.ncku.edu.tw \
    --cc=kunyu@nfschina.com \
    --cc=legion@kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=linux-trace-kernel@vger.kernel.org \
    --cc=liushixin2@huawei.com \
    --cc=mark.rutland@arm.com \
    --cc=mhiramat@kernel.org \
    --cc=mhocko@suse.com \
    --cc=minchan@kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=namit@vmware.com \
    --cc=pasha.tatashin@soleen.com \
    --cc=peng301@purdue.edu \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=pfonseca@purdue.edu \
    --cc=rostedt@goodmis.org \
    --cc=shakeelb@google.com \
    --cc=shy828301@gmail.com \
    --cc=steven@liquorix.net \
    --cc=surenb@google.com \
    --cc=tglx@linutronix.de \
    --cc=tongtiangen@huawei.com \
    --cc=vbabka@suse.cz \
    --cc=vincent.whitchurch@axis.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=yuzhao@google.com \
    --cc=zhengqi.arch@bytedance.com \
    --cc=zokeefe@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.