mm-commits.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: jgg@ziepe.ca, kirill@shutemov.name, mm-commits@vger.kernel.org,
	peterx@redhat.com
Subject: [folded-merged] mm-remove-src-dst-mm-parameter-in-copy_page_range-v2.patch removed from -mm tree
Date: Tue, 13 Oct 2020 16:10:41 -0700	[thread overview]
Message-ID: <20201013231041.Z81ck4Ba1%akpm@linux-foundation.org> (raw)
In-Reply-To: <20201010231559.e148a66f744d0b4870301450@linux-foundation.org>


The patch titled
     Subject: mm-remove-src-dst-mm-parameter-in-copy_page_range-v2
has been removed from the -mm tree.  Its filename was
     mm-remove-src-dst-mm-parameter-in-copy_page_range-v2.patch

This patch was dropped because it was folded into mm-remove-src-dst-mm-parameter-in-copy_page_range.patch

------------------------------------------------------
From: Peter Xu <peterx@redhat.com>
Subject: mm-remove-src-dst-mm-parameter-in-copy_page_range-v2

further reorder some parameters and line format, per Jason

Link: https://lkml.kernel.org/r/20201002192647.7161-1-peterx@redhat.com
Reported-by: Kirill A. Shutemov <kirill@shutemov.name>
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 include/linux/mm.h |    3 
 kernel/fork.c      |    2 
 mm/memory.c        |  140 ++++++++++++++++++++++---------------------
 3 files changed, 76 insertions(+), 69 deletions(-)

--- a/include/linux/mm.h~mm-remove-src-dst-mm-parameter-in-copy_page_range-v2
+++ a/include/linux/mm.h
@@ -1645,7 +1645,8 @@ struct mmu_notifier_range;
 
 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
 		unsigned long end, unsigned long floor, unsigned long ceiling);
-int copy_page_range(struct vm_area_struct *vma, struct vm_area_struct *new);
+int
+copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
 		   struct mmu_notifier_range *range,
 		   pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
--- a/kernel/fork.c~mm-remove-src-dst-mm-parameter-in-copy_page_range-v2
+++ a/kernel/fork.c
@@ -589,7 +589,7 @@ static __latent_entropy int dup_mmap(str
 
 		mm->map_count++;
 		if (!(tmp->vm_flags & VM_WIPEONFORK))
-			retval = copy_page_range(mpnt, tmp);
+			retval = copy_page_range(tmp, mpnt);
 
 		if (tmp->vm_ops && tmp->vm_ops->open)
 			tmp->vm_ops->open(tmp);
--- a/mm/memory.c~mm-remove-src-dst-mm-parameter-in-copy_page_range-v2
+++ a/mm/memory.c
@@ -794,15 +794,15 @@ copy_nonpresent_pte(struct mm_struct *ds
  * lock.
  */
 static inline int
-copy_present_page(struct mm_struct *dst_mm, struct mm_struct *src_mm,
-		pte_t *dst_pte, pte_t *src_pte,
-		struct vm_area_struct *vma, struct vm_area_struct *new,
-		unsigned long addr, int *rss, struct page **prealloc,
-		pte_t pte, struct page *page)
+copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
+		  struct page **prealloc, pte_t pte, struct page *page)
 {
+	struct mm_struct *dst_mm = dst_vma->vm_mm;
+	struct mm_struct *src_mm = src_vma->vm_mm;
 	struct page *new_page;
 
-	if (!is_cow_mapping(vma->vm_flags))
+	if (!is_cow_mapping(src_vma->vm_flags))
 		return 1;
 
 	/*
@@ -865,15 +865,15 @@ copy_present_page(struct mm_struct *dst_
 	 * over and copy the page & arm it.
 	 */
 	*prealloc = NULL;
-	copy_user_highpage(new_page, page, addr, vma);
+	copy_user_highpage(new_page, page, addr, src_vma);
 	__SetPageUptodate(new_page);
-	page_add_new_anon_rmap(new_page, new, addr, false);
-	lru_cache_add_inactive_or_unevictable(new_page, new);
+	page_add_new_anon_rmap(new_page, dst_vma, addr, false);
+	lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
 	rss[mm_counter(new_page)]++;
 
 	/* All done, just insert the new page copy in the child */
-	pte = mk_pte(new_page, new->vm_page_prot);
-	pte = maybe_mkwrite(pte_mkdirty(pte), new);
+	pte = mk_pte(new_page, dst_vma->vm_page_prot);
+	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
 	set_pte_at(dst_mm, addr, dst_pte, pte);
 	return 0;
 }
@@ -883,24 +883,22 @@ copy_present_page(struct mm_struct *dst_
  * is required to copy this pte.
  */
 static inline int
-copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
-		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
-		struct vm_area_struct *new,
-		unsigned long addr, int *rss, struct page **prealloc)
+copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
+		 struct page **prealloc)
 {
-	unsigned long vm_flags = vma->vm_flags;
+	struct mm_struct *dst_mm = dst_vma->vm_mm;
+	struct mm_struct *src_mm = src_vma->vm_mm;
+	unsigned long vm_flags = src_vma->vm_flags;
 	pte_t pte = *src_pte;
 	struct page *page;
 
-	page = vm_normal_page(vma, addr, pte);
+	page = vm_normal_page(src_vma, addr, pte);
 	if (page) {
 		int retval;
 
-		retval = copy_present_page(dst_mm, src_mm,
-			dst_pte, src_pte,
-			vma, new,
-			addr, rss, prealloc,
-			pte, page);
+		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
+					   addr, rss, prealloc, pte, page);
 		if (retval <= 0)
 			return retval;
 
@@ -957,12 +955,13 @@ page_copy_prealloc(struct mm_struct *src
 	return new_page;
 }
 
-static int copy_pte_range(pmd_t *dst_pmd, pmd_t *src_pmd,
-		   struct vm_area_struct *vma, struct vm_area_struct *new,
-		   unsigned long addr, unsigned long end)
+static int
+copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+	       unsigned long end)
 {
-	struct mm_struct *dst_mm = new->vm_mm;
-	struct mm_struct *src_mm = vma->vm_mm;
+	struct mm_struct *dst_mm = dst_vma->vm_mm;
+	struct mm_struct *src_mm = src_vma->vm_mm;
 	pte_t *orig_src_pte, *orig_dst_pte;
 	pte_t *src_pte, *dst_pte;
 	spinlock_t *src_ptl, *dst_ptl;
@@ -1005,15 +1004,15 @@ again:
 		if (unlikely(!pte_present(*src_pte))) {
 			entry.val = copy_nonpresent_pte(dst_mm, src_mm,
 							dst_pte, src_pte,
-							vma, addr, rss);
+							src_vma, addr, rss);
 			if (entry.val)
 				break;
 			progress += 8;
 			continue;
 		}
 		/* copy_present_pte() will clear `*prealloc' if consumed */
-		ret = copy_present_pte(dst_mm, src_mm, dst_pte, src_pte,
-				       vma, new, addr, rss, &prealloc);
+		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
+				       addr, rss, &prealloc);
 		/*
 		 * If we need a pre-allocated page for this pte, drop the
 		 * locks, allocate, and try again.
@@ -1048,7 +1047,7 @@ again:
 		entry.val = 0;
 	} else if (ret) {
 		WARN_ON_ONCE(ret != -EAGAIN);
-		prealloc = page_copy_prealloc(src_mm, vma, addr);
+		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
 		if (!prealloc)
 			return -ENOMEM;
 		/* We've captured and resolved the error. Reset, try again. */
@@ -1062,12 +1061,13 @@ out:
 	return ret;
 }
 
-static inline int copy_pmd_range(pud_t *dst_pud, pud_t *src_pud,
-		struct vm_area_struct *vma, struct vm_area_struct *new,
-		unsigned long addr, unsigned long end)
+static inline int
+copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+	       unsigned long end)
 {
-	struct mm_struct *dst_mm = new->vm_mm;
-	struct mm_struct *src_mm = vma->vm_mm;
+	struct mm_struct *dst_mm = dst_vma->vm_mm;
+	struct mm_struct *src_mm = src_vma->vm_mm;
 	pmd_t *src_pmd, *dst_pmd;
 	unsigned long next;
 
@@ -1080,9 +1080,9 @@ static inline int copy_pmd_range(pud_t *
 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
 			|| pmd_devmap(*src_pmd)) {
 			int err;
-			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
+			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
 			err = copy_huge_pmd(dst_mm, src_mm,
-					    dst_pmd, src_pmd, addr, vma);
+					    dst_pmd, src_pmd, addr, src_vma);
 			if (err == -ENOMEM)
 				return -ENOMEM;
 			if (!err)
@@ -1091,18 +1091,20 @@ static inline int copy_pmd_range(pud_t *
 		}
 		if (pmd_none_or_clear_bad(src_pmd))
 			continue;
-		if (copy_pte_range(dst_pmd, src_pmd, vma, new, addr, next))
+		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
+				   addr, next))
 			return -ENOMEM;
 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
 	return 0;
 }
 
-static inline int copy_pud_range(p4d_t *dst_p4d, p4d_t *src_p4d,
-		struct vm_area_struct *vma, struct vm_area_struct *new,
-		unsigned long addr, unsigned long end)
+static inline int
+copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
+	       unsigned long end)
 {
-	struct mm_struct *dst_mm = new->vm_mm;
-	struct mm_struct *src_mm = vma->vm_mm;
+	struct mm_struct *dst_mm = dst_vma->vm_mm;
+	struct mm_struct *src_mm = src_vma->vm_mm;
 	pud_t *src_pud, *dst_pud;
 	unsigned long next;
 
@@ -1115,9 +1117,9 @@ static inline int copy_pud_range(p4d_t *
 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
 			int err;
 
-			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
+			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
 			err = copy_huge_pud(dst_mm, src_mm,
-					    dst_pud, src_pud, addr, vma);
+					    dst_pud, src_pud, addr, src_vma);
 			if (err == -ENOMEM)
 				return -ENOMEM;
 			if (!err)
@@ -1126,17 +1128,19 @@ static inline int copy_pud_range(p4d_t *
 		}
 		if (pud_none_or_clear_bad(src_pud))
 			continue;
-		if (copy_pmd_range(dst_pud, src_pud, vma, new, addr, next))
+		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
+				   addr, next))
 			return -ENOMEM;
 	} while (dst_pud++, src_pud++, addr = next, addr != end);
 	return 0;
 }
 
-static inline int copy_p4d_range(pgd_t *dst_pgd, pgd_t *src_pgd,
-		struct vm_area_struct *vma, struct vm_area_struct *new,
-		unsigned long addr, unsigned long end)
+static inline int
+copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
+	       unsigned long end)
 {
-	struct mm_struct *dst_mm = new->vm_mm;
+	struct mm_struct *dst_mm = dst_vma->vm_mm;
 	p4d_t *src_p4d, *dst_p4d;
 	unsigned long next;
 
@@ -1148,20 +1152,22 @@ static inline int copy_p4d_range(pgd_t *
 		next = p4d_addr_end(addr, end);
 		if (p4d_none_or_clear_bad(src_p4d))
 			continue;
-		if (copy_pud_range(dst_p4d, src_p4d, vma, new, addr, next))
+		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
+				   addr, next))
 			return -ENOMEM;
 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
 	return 0;
 }
 
-int copy_page_range(struct vm_area_struct *vma, struct vm_area_struct *new)
+int
+copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
 {
 	pgd_t *src_pgd, *dst_pgd;
 	unsigned long next;
-	unsigned long addr = vma->vm_start;
-	unsigned long end = vma->vm_end;
-	struct mm_struct *dst_mm = new->vm_mm;
-	struct mm_struct *src_mm = vma->vm_mm;
+	unsigned long addr = src_vma->vm_start;
+	unsigned long end = src_vma->vm_end;
+	struct mm_struct *dst_mm = dst_vma->vm_mm;
+	struct mm_struct *src_mm = src_vma->vm_mm;
 	struct mmu_notifier_range range;
 	bool is_cow;
 	int ret;
@@ -1172,19 +1178,19 @@ int copy_page_range(struct vm_area_struc
 	 * readonly mappings. The tradeoff is that copy_page_range is more
 	 * efficient than faulting.
 	 */
-	if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
-			!vma->anon_vma)
+	if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
+	    !src_vma->anon_vma)
 		return 0;
 
-	if (is_vm_hugetlb_page(vma))
-		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
+	if (is_vm_hugetlb_page(src_vma))
+		return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
 
-	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
+	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
 		/*
 		 * We do not free on error cases below as remove_vma
 		 * gets called on error from higher level routine
 		 */
-		ret = track_pfn_copy(vma);
+		ret = track_pfn_copy(src_vma);
 		if (ret)
 			return ret;
 	}
@@ -1195,11 +1201,11 @@ int copy_page_range(struct vm_area_struc
 	 * parent mm. And a permission downgrade will only happen if
 	 * is_cow_mapping() returns true.
 	 */
-	is_cow = is_cow_mapping(vma->vm_flags);
+	is_cow = is_cow_mapping(src_vma->vm_flags);
 
 	if (is_cow) {
 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
-					0, vma, src_mm, addr, end);
+					0, src_vma, src_mm, addr, end);
 		mmu_notifier_invalidate_range_start(&range);
 	}
 
@@ -1210,8 +1216,8 @@ int copy_page_range(struct vm_area_struc
 		next = pgd_addr_end(addr, end);
 		if (pgd_none_or_clear_bad(src_pgd))
 			continue;
-		if (unlikely(copy_p4d_range(dst_pgd, src_pgd,
-					    vma, new, addr, next))) {
+		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
+					    addr, next))) {
 			ret = -ENOMEM;
 			break;
 		}
_

Patches currently in -mm which might be from peterx@redhat.com are

mm-remove-src-dst-mm-parameter-in-copy_page_range.patch
mm-remove-src-dst-mm-parameter-in-copy_page_range-v2-fix.patch


  parent reply	other threads:[~2020-10-13 23:10 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-11  6:15 incoming Andrew Morton
2020-10-11  6:16 ` [patch 1/5] MAINTAINERS: change hardening mailing list Andrew Morton
2020-10-11  6:16 ` [patch 2/5] MAINTAINERS: Antoine Tenart's email address Andrew Morton
2020-10-11  6:16 ` [patch 3/5] mm: mmap: fix general protection fault in unlink_file_vma() Andrew Morton
2020-10-11  6:16 ` [patch 4/5] mm: validate inode in mapping_set_error() Andrew Morton
2020-10-11  6:16 ` [patch 5/5] mm: khugepaged: recalculate min_free_kbytes after memory hotplug as expected by khugepaged Andrew Morton
2020-10-13 23:02 ` [folded-merged] x86-numa-add-nohmat-option-fix.patch removed from -mm tree Andrew Morton
2020-10-13 23:02 ` [folded-merged] acpi-hmat-refactor-hmat_register_target_device-to-hmem_register_device-fix.patch " Andrew Morton
2020-10-13 23:03 ` [folded-merged] mm-memory_hotplug-introduce-default-phys_to_target_node-implementation-fix.patch " Andrew Morton
2020-10-13 23:04 ` [folded-merged] acpi-hmat-attach-a-device-for-each-soft-reserved-range-fix.patch " Andrew Morton
2020-10-13 23:05 ` [folded-merged] mm-memremap_pages-convert-to-struct-range-fix.patch " Andrew Morton
2020-10-13 23:05 ` [folded-merged] mm-optimise-madvise-willneed-fix.patch " Andrew Morton
2020-10-13 23:06 ` [folded-merged] mm-convert-find_get_entry-to-return-the-head-page-fix.patch " Andrew Morton
2020-10-13 23:07 ` [folded-merged] mm-shmem-return-head-page-from-find_lock_entry-fix.patch " Andrew Morton
2020-10-13 23:08 ` [folded-merged] mm-gup-dont-permit-users-to-call-get_user_pages-with-foll_longterm-fix.patch " Andrew Morton
2020-10-13 23:09 ` [folded-merged] mm-memcg-simplify-mem_cgroup_get_max-v4.patch " Andrew Morton
2020-10-13 23:09 ` [folded-merged] mm-account-pmd-tables-like-pte-tables-fix.patch " Andrew Morton
2020-10-13 23:10 ` Andrew Morton [this message]
2020-10-13 23:10 ` [folded-merged] mm-remove-src-dst-mm-parameter-in-copy_page_range-v2-fix.patch " Andrew Morton
2020-10-13 23:12 ` [folded-merged] kasan-port-kasan-tests-to-kunit-v14.patch " Andrew Morton
2020-10-13 23:12 ` [folded-merged] mm-page_allocc-clean-code-by-removing-unnecessary-initialization-fix.patch " Andrew Morton
2020-10-13 23:13 ` [folded-merged] mm-hugetlb-take-the-free-hpage-during-the-iteration-directly-v4.patch " Andrew Morton
2020-10-13 23:13 ` [folded-merged] mm-mempool-add-else-to-split-mutually-exclusive-case-fix.patch " Andrew Morton
2020-10-13 23:14 ` [folded-merged] memblock-make-memblock_debug-and-related-functionality-private-fix.patch " Andrew Morton
2020-10-13 23:14 ` [folded-merged] arch-drivers-replace-for_each_membock-with-for_each_mem_range-fix.patch " Andrew Morton
2020-10-13 23:14 ` [folded-merged] arch-drivers-replace-for_each_membock-with-for_each_mem_range-fix-2.patch " Andrew Morton
2020-10-13 23:15 ` [folded-merged] mm-oom_adj-dont-loop-through-tasks-in-__set_oom_adj-when-not-necessary-v3.patch " Andrew Morton
2020-10-16  2:11 ` [folded-merged] powerpc-mm-move-setting-pte-specific-flags-to-pfn_pte-fix.patch " Andrew Morton
2020-10-16  2:12 ` [folded-merged] mm-debug_vm_pgtable-hugetlb-disable-hugetlb-test-on-ppc64-fix.patch " Andrew Morton
2020-10-16  2:13 ` [folded-merged] mm-debug_vm_pgtable-avoid-none-pte-in-pte_clear_test-fix.patch " Andrew Morton
2020-10-16  2:14 ` [folded-merged] xarray-add-xa_get_order-fix.patch " Andrew Morton
2020-10-16  2:14 ` [folded-merged] xarray-add-xas_split-fix.patch " Andrew Morton
2020-10-16  2:14 ` [folded-merged] xarray-add-xas_split-fix-2.patch " Andrew Morton
2020-10-16  2:14 ` [folded-merged] xarray-add-xas_split-fix-3patch.patch " Andrew Morton
2020-10-16  2:15 ` [folded-merged] mm-memory-remove-page-fault-assumption-of-compound-page-size-fix.patch " Andrew Morton
2020-10-16  2:16 ` [folded-merged] mm-memory_hotplug-simplify-page-offlining-fix.patch " Andrew Morton
2020-10-16  2:17 ` [folded-merged] kernel-resource-make-release_mem_region_adjustable-never-fail-fix.patch " Andrew Morton
2020-10-16  2:19 ` [folded-merged] checkpatch-warn-on-self-assignments-checkpatch-fixes.patch " Andrew Morton
2020-10-16  2:20 ` [folded-merged] checkpatch-allow-not-using-f-with-files-that-are-in-git-fix.patch " Andrew Morton
2020-10-16  2:20 ` [folded-merged] checkpatch-emit-a-warning-on-embedded-filenames-fix.patch " Andrew Morton
2020-10-16  2:21 ` [folded-merged] fs-binfmt_elf-use-pt_load-p_align-values-for-suitable-start-address-fix.patch " Andrew Morton
2020-10-16  2:21 ` [folded-merged] fs-binfmt_elf-use-pt_load-p_align-values-for-suitable-start-address-v4.patch " Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201013231041.Z81ck4Ba1%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=jgg@ziepe.ca \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mm-commits@vger.kernel.org \
    --cc=peterx@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).