All of lore.kernel.org
 help / color / mirror / Atom feed
From: Liam Howlett <liam.howlett@oracle.com>
To: "linux-mm@kvack.org" <linux-mm@kvack.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	"maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>
Cc: Liam Howlett <liam.howlett@oracle.com>,
	Liam Howlett <liam.howlett@oracle.com>
Subject: [PATCH v3 43/48] mm: Don't use __vma_adjust() in __split_vma()
Date: Tue, 17 Jan 2023 02:34:23 +0000	[thread overview]
Message-ID: <20230117023335.1690727-44-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20230117023335.1690727-1-Liam.Howlett@oracle.com>

From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Use the abstracted locking and maple tree operations.  Since
__split_vma() is the only user of the __vma_adjust() function to use the
insert argument, drop that argument.  Remove the NULL passed through
from fs/exec's shift_arg_pages() at the same time.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 fs/exec.c          |   4 +-
 include/linux/mm.h |   7 ++-
 mm/mmap.c          | 118 +++++++++++++++++++++------------------------
 3 files changed, 60 insertions(+), 69 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index 76ee62e1d3f1..d52fca2dd30b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -699,7 +699,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 	/*
 	 * cover the whole range: [new_start, old_end)
 	 */
-	if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
+	if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff))
 		return -ENOMEM;
 
 	/*
@@ -733,7 +733,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 
 	vma_prev(&vmi);
 	/* Shrink the vma to just the new range */
-	return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff, NULL);
+	return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff);
 }
 
 /*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index aabfd4183091..a00871cc63cc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2823,13 +2823,12 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
 /* mmap.c */
 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
 extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
-	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
-	struct vm_area_struct *expand);
+	unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
 static inline int vma_adjust(struct vma_iterator *vmi,
 	struct vm_area_struct *vma, unsigned long start, unsigned long end,
-	pgoff_t pgoff, struct vm_area_struct *insert)
+	pgoff_t pgoff)
 {
-	return __vma_adjust(vmi, vma, start, end, pgoff, insert, NULL);
+	return __vma_adjust(vmi, vma, start, end, pgoff, NULL);
 }
 extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
 	struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
diff --git a/mm/mmap.c b/mm/mmap.c
index b28f3416b60a..a8c941e3be84 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -695,7 +695,7 @@ inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
  */
 int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	unsigned long start, unsigned long end, pgoff_t pgoff,
-	struct vm_area_struct *insert, struct vm_area_struct *expand)
+	struct vm_area_struct *expand)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct vm_area_struct *remove2 = NULL;
@@ -708,7 +708,7 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	struct vm_area_struct *exporter = NULL, *importer = NULL;
 	struct vma_prepare vma_prep;
 
-	if (next && !insert) {
+	if (next) {
 		if (end >= next->vm_end) {
 			/*
 			 * vma expands, overlapping all the next, and
@@ -799,39 +799,25 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	VM_WARN_ON(vma_prep.anon_vma && adjust_next && next->anon_vma &&
 		   vma_prep.anon_vma != next->anon_vma);
 
-	vma_prep.insert = insert;
 	vma_prepare(&vma_prep);
 
-	if (start != vma->vm_start) {
-		if (vma->vm_start < start) {
-			if (!insert || (insert->vm_end != start)) {
-				vma_iter_clear(vmi, vma->vm_start, start);
-				vma_iter_set(vmi, start);
-				VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
-			}
-		} else {
-			vma_changed = true;
-		}
-		vma->vm_start = start;
-	}
-	if (end != vma->vm_end) {
-		if (vma->vm_end > end) {
-			if (!insert || (insert->vm_start != end)) {
-				vma_iter_clear(vmi, end, vma->vm_end);
-				vma_iter_set(vmi, vma->vm_end);
-				VM_WARN_ON(insert &&
-					   insert->vm_end < vma->vm_end);
-			}
-		} else {
-			vma_changed = true;
-		}
-		vma->vm_end = end;
-	}
+	if (vma->vm_start < start)
+		vma_iter_clear(vmi, vma->vm_start, start);
+	else if (start != vma->vm_start)
+		vma_changed = true;
+
+	if (vma->vm_end > end)
+		vma_iter_clear(vmi, end, vma->vm_end);
+	else if (end != vma->vm_end)
+		vma_changed = true;
+
+	vma->vm_start = start;
+	vma->vm_end = end;
+	vma->vm_pgoff = pgoff;
 
 	if (vma_changed)
 		vma_iter_store(vmi, vma);
 
-	vma->vm_pgoff = pgoff;
 	if (adjust_next) {
 		next->vm_start += adjust_next;
 		next->vm_pgoff += adjust_next >> PAGE_SHIFT;
@@ -850,9 +836,9 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
  * per-vma resources, so we don't attempt to merge those.
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
-				struct file *file, unsigned long vm_flags,
-				struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-				struct anon_vma_name *anon_name)
+				   struct file *file, unsigned long vm_flags,
+				   struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+				   struct anon_vma_name *anon_name)
 {
 	/*
 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -1034,20 +1020,19 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
 			is_mergeable_anon_vma(prev->anon_vma,
 				next->anon_vma, NULL)) {	 /* cases 1, 6 */
 		err = __vma_adjust(vmi, prev, prev->vm_start,
-					next->vm_end, prev->vm_pgoff, NULL,
-					prev);
+					next->vm_end, prev->vm_pgoff, prev);
 		res = prev;
 	} else if (merge_prev) {			/* cases 2, 5, 7 */
 		err = __vma_adjust(vmi, prev, prev->vm_start,
-					end, prev->vm_pgoff, NULL, prev);
+					end, prev->vm_pgoff, prev);
 		res = prev;
 	} else if (merge_next) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = __vma_adjust(vmi, prev, prev->vm_start,
-					addr, prev->vm_pgoff, NULL, next);
+					addr, prev->vm_pgoff, next);
 		else					/* cases 3, 8 */
 			err = __vma_adjust(vmi, mid, addr, next->vm_end,
-					next->vm_pgoff - pglen, NULL, next);
+					next->vm_pgoff - pglen, next);
 		res = next;
 	}
 
@@ -2187,11 +2172,15 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 		unsigned long addr, int new_below)
 {
+	struct vma_prepare vp;
 	struct vm_area_struct *new;
 	int err;
 
 	validate_mm_mt(vma->vm_mm);
 
+	WARN_ON(vma->vm_start >= addr);
+	WARN_ON(vma->vm_end <= addr);
+
 	if (vma->vm_ops && vma->vm_ops->may_split) {
 		err = vma->vm_ops->may_split(vma, addr);
 		if (err)
@@ -2202,16 +2191,20 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	if (!new)
 		return -ENOMEM;
 
-	if (new_below)
+	err = -ENOMEM;
+	if (vma_iter_prealloc(vmi, vma))
+		goto out_free_vma;
+
+	if (new_below) {
 		new->vm_end = addr;
-	else {
+	} else {
 		new->vm_start = addr;
 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
 	}
 
 	err = vma_dup_policy(vma, new);
 	if (err)
-		goto out_free_vma;
+		goto out_free_vmi;
 
 	err = anon_vma_clone(new, vma);
 	if (err)
@@ -2223,33 +2216,32 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	if (new->vm_ops && new->vm_ops->open)
 		new->vm_ops->open(new);
 
-	if (new_below)
-		err = vma_adjust(vmi, vma, addr, vma->vm_end,
-			vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
-			new);
-	else
-		err = vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
-				 new);
+	vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
+	init_vma_prep(&vp, vma);
+	vp.insert = new;
+	vma_prepare(&vp);
 
-	/* Success. */
-	if (!err) {
-		if (new_below)
-			vma_next(vmi);
-		return 0;
+	if (new_below) {
+		vma->vm_start = addr;
+		vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
+	} else {
+		vma->vm_end = addr;
 	}
 
-	/* Avoid vm accounting in close() operation */
-	new->vm_start = new->vm_end;
-	new->vm_pgoff = 0;
-	/* Clean everything up if vma_adjust failed. */
-	if (new->vm_ops && new->vm_ops->close)
-		new->vm_ops->close(new);
-	if (new->vm_file)
-		fput(new->vm_file);
-	unlink_anon_vmas(new);
- out_free_mpol:
+	/* vma_complete stores the new vma */
+	vma_complete(&vp, vmi, vma->vm_mm);
+
+	/* Success. */
+	if (new_below)
+		vma_next(vmi);
+	validate_mm_mt(vma->vm_mm);
+	return 0;
+
+out_free_mpol:
 	mpol_put(vma_policy(new));
- out_free_vma:
+out_free_vmi:
+	vma_iter_free(vmi);
+out_free_vma:
 	vm_area_free(new);
 	validate_mm_mt(vma->vm_mm);
 	return err;
-- 
2.35.1

  parent reply	other threads:[~2023-01-17  3:45 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-17  2:34 [PATCH v3 00/48] VMA tree type safety and remove __vma_adjust() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 01/48] maple_tree: Add mas_init() function Liam Howlett
2023-01-17  2:34 ` [PATCH v3 02/48] maple_tree: Fix potential rcu issue Liam Howlett
2023-01-17  2:34 ` [PATCH v3 04/48] test_maple_tree: Test modifications while iterating Liam Howlett
2023-01-17  2:34 ` [PATCH v3 03/48] maple_tree: Reduce user error potential Liam Howlett
2023-01-17  2:34 ` [PATCH v3 05/48] maple_tree: Fix handle of invalidated state in mas_wr_store_setup() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 06/48] maple_tree: Fix mas_prev() and mas_find() state handling Liam Howlett
2023-01-17  2:34 ` [PATCH v3 07/48] mm: Expand vma iterator interface Liam Howlett
2023-01-17  2:34 ` [PATCH v3 08/48] mm/mmap: convert brk to use vma iterator Liam Howlett
2023-01-17  2:34 ` [PATCH v3 09/48] kernel/fork: Convert forking to using the vmi iterator Liam Howlett
2023-01-17  2:34 ` [PATCH v3 10/48] mmap: Convert vma_link() vma iterator Liam Howlett
2023-01-17  2:34 ` [PATCH v3 12/48] mmap: Change do_mas_munmap and do_mas_aligned_munmap() to use " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 11/48] mm/mmap: Remove preallocation from do_mas_align_munmap() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 14/48] mm: Add temporary vma iterator versions of vma_merge(), split_vma(), and __split_vma() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 15/48] ipc/shm: Use the vma iterator for munmap calls Liam Howlett
2023-01-17  2:34 ` [PATCH v3 13/48] mmap: Convert vma_expand() to use vma iterator Liam Howlett
2023-01-17  2:34 ` [PATCH v3 18/48] mlock: Convert mlock to " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 17/48] mm: Change mprotect_fixup " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 16/48] userfaultfd: Use " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 21/48] task_mmu: Convert to " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 19/48] coredump: " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 20/48] mempolicy: " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 24/48] mmap: Pass through vmi iterator to __split_vma() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 22/48] sched: Convert to vma iterator Liam Howlett
2023-01-17  2:34 ` [PATCH v3 23/48] madvise: Use vmi iterator for __split_vma() and vma_merge() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 25/48] mmap: Use vmi version of vma_merge() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 28/48] nommu: Pass through vma iterator to shrink_vma() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 26/48] mm/mremap: Use vmi version of vma_merge() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 27/48] nommu: Convert nommu to using the vma iterator Liam Howlett
2023-01-17  2:34 ` [PATCH v3 29/48] mm: Switch vma_merge(), split_vma(), and __split_vma to " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 30/48] mm/damon: Stop using vma_mas_store() for maple tree store Liam Howlett
2023-01-17 19:11   ` SeongJae Park
2023-01-17 19:16     ` SeongJae Park
2023-01-17 22:20     ` Daniel Latypov
2023-01-17 22:47       ` Liam Howlett
2023-01-19  2:00         ` SeongJae Park
2023-01-19 18:55           ` Liam R. Howlett
2023-01-17  2:34 ` [PATCH v3 31/48] mmap: Convert __vma_adjust() to use vma iterator Liam Howlett
2023-01-17  2:34 ` [PATCH v3 35/48] mm: Pass vma iterator through to __vma_adjust() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 32/48] mm: Pass through vma iterator " Liam Howlett
2023-01-17  2:34 ` [PATCH v3 34/48] mm: Remove unnecessary write to vma iterator in __vma_adjust() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 33/48] madvise: Use split_vma() instead of __split_vma() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 37/48] mmap: Clean up mmap_region() unrolling Liam Howlett
2023-01-17  2:34 ` [PATCH v3 36/48] mm: Add vma iterator to vma_adjust() arguments Liam Howlett
2023-01-17  2:34 ` [PATCH v3 38/48] mm: Change munmap splitting order and move_vma() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 41/48] mm/mmap: Use vma_prepare() and vma_complete() in vma_expand() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 42/48] mm/mmap: Introduce init_vma_prep() and init_multi_vma_prep() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 40/48] mm/mmap: Refactor locking out of __vma_adjust() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 39/48] mm/mmap: move anon_vma setting in __vma_adjust() Liam Howlett
2023-01-17  2:34 ` Liam Howlett [this message]
2023-01-17  2:34 ` [PATCH v3 45/48] mm/mmap: Introduce dup_vma_anon() helper Liam Howlett
2023-01-17  2:34 ` [PATCH v3 44/48] mm/mmap: Don't use __vma_adjust() in shift_arg_pages() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 48/48] vma_merge: Set vma iterator to correct position Liam Howlett
2023-01-17  2:34 ` [PATCH v3 47/48] mm/mmap: Remove __vma_adjust() Liam Howlett
2023-01-17  2:34 ` [PATCH v3 46/48] mm/mmap: Convert do_brk_flags() to use vma_prepare() and vma_complete() Liam Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230117023335.1690727-44-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.