linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
To: maple-tree@lists.infradead.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org
Cc: Andrew Morton <akpm@google.com>, Song Liu <songliubraving@fb.com>,
	Davidlohr Bueso <dave@stgolabs.net>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Matthew Wilcox <willy@infradead.org>,
	Jerome Glisse <jglisse@redhat.com>,
	David Rientjes <rientjes@google.com>,
	Axel Rasmussen <axelrasmussen@google.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH 21/28] mm/mmap: Change __do_munmap() to use a ma_state
Date: Thu, 10 Dec 2020 12:03:55 -0500	[thread overview]
Message-ID: <20201210170402.3468568-22-Liam.Howlett@Oracle.com> (raw)
In-Reply-To: <20201210170402.3468568-1-Liam.Howlett@Oracle.com>

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 mm/mmap.c | 99 ++++++++++++++++++++++++++-----------------------------
 1 file changed, 47 insertions(+), 52 deletions(-)

diff --git a/mm/mmap.c b/mm/mmap.c
index 15616f105d051..34f337a5fc31d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2562,44 +2562,6 @@ static void unmap_region(struct mm_struct *mm,
 	tlb_finish_mmu(&tlb, start, end);
 }
 
-/*
- * Create a list of vma's touched by the unmap, removing them from the mm's
- * vma list as we go..
- */
-static bool
-detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
-	struct vm_area_struct *prev, unsigned long end)
-{
-	struct vm_area_struct **insertion_point;
-	struct vm_area_struct *tail_vma = NULL;
-
-	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
-	vma->vm_prev = NULL;
-	vma_mt_szero(mm, vma->vm_start, end);
-	do {
-		mm->map_count--;
-		tail_vma = vma;
-		vma = vma->vm_next;
-	} while (vma && vma->vm_start < end);
-	*insertion_point = vma;
-	if (vma)
-		vma->vm_prev = prev;
-	else
-		mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
-	tail_vma->vm_next = NULL;
-
-	/*
-	 * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
-	 * VM_GROWSUP VMA. Such VMAs can change their size under
-	 * down_read(mmap_lock) and collide with the VMA we are about to unmap.
-	 */
-	if (vma && (vma->vm_flags & VM_GROWSDOWN))
-		return false;
-	if (prev && (prev->vm_flags & VM_GROWSUP))
-		return false;
-	return true;
-}
-
 /*
  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
  * has already been checked or doesn't make sense to fail.
@@ -2679,12 +2641,16 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	return __split_vma(mm, vma, addr, new_below);
 }
 
-static inline void unlock_range(struct vm_area_struct *start, unsigned long limit)
+static inline int unlock_range(struct vm_area_struct *start,
+			       struct vm_area_struct **tail, unsigned long limit)
 {
 	struct mm_struct *mm = start->vm_mm;
 	struct vm_area_struct *tmp = start;
+	int count = 0;
 
 	while (tmp && tmp->vm_start < limit) {
+		*tail = tmp;
+		count++;
 		if (tmp->vm_flags & VM_LOCKED) {
 			mm->locked_vm -= vma_pages(tmp);
 			munlock_vma_pages_all(tmp);
@@ -2692,6 +2658,8 @@ static inline void unlock_range(struct vm_area_struct *start, unsigned long limi
 
 		tmp = tmp->vm_next;
 	}
+
+	return count;
 }
 /* Munmap is split into 2 main parts -- this part which finds
  * what needs doing, and the areas themselves, which do the
@@ -2703,23 +2671,24 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 {
 	unsigned long end;
 	struct vm_area_struct *vma, *prev, *last;
+	MA_STATE(mas, &mm->mm_mt, start, start);
 
 	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
 		return -EINVAL;
 
-	len = PAGE_ALIGN(len);
-	end = start + len;
-	if (len == 0)
+	end = start + PAGE_ALIGN(len);
+	if (end == start)
 		return -EINVAL;
 
 	 /* arch_unmap() might do unmaps itself.  */
 	arch_unmap(mm, start, end);
 
 	/* Find the first overlapping VMA */
-	vma = find_vma_intersection(mm, start, end);
+	vma = mas_find(&mas, end - 1);
 	if (!vma)
 		return 0;
 
+	mas.last = end - 1;
 	/* we have start < vma->vm_end  */
 
 	/*
@@ -2744,6 +2713,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 			return error;
 		prev = vma;
 		vma = vma_next(mm, prev);
+		mas.index = start;
+		mas_reset(&mas);
 	} else {
 		prev = vma->vm_prev;
 	}
@@ -2759,6 +2730,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 		if (error)
 			return error;
 		vma = vma_next(mm, prev);
+		mas_reset(&mas);
 	}
 
 
@@ -2779,17 +2751,40 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 	}
 
 	/*
-	 * unlock any mlock()ed ranges before detaching vmas
+	 * unlock any mlock()ed ranges before detaching vmas, count the number
+	 * of VMAs to be dropped, and return the tail entry of the affected
+	 * area.
 	 */
-	if (mm->locked_vm)
-		unlock_range(vma, end);
+	mm->map_count -= unlock_range(vma, &last, end);
+	/* Drop removed area from the tree */
+	mas_store_gfp(&mas, NULL, GFP_KERNEL);
 
-	/* Detach vmas from the MM linked list and remove from the mm tree*/
-	if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
-		downgrade = false;
+	/* Detach vmas from the MM linked list */
+	vma->vm_prev = NULL;
+	if (prev)
+		prev->vm_next = last->vm_next;
+	else
+		mm->mmap = last->vm_next;
 
-	if (downgrade)
-		mmap_write_downgrade(mm);
+	if (last->vm_next) {
+		last->vm_next->vm_prev = prev;
+		last->vm_next = NULL;
+	} else
+		mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
+
+	/*
+	 * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
+	 * VM_GROWSUP VMA. Such VMAs can change their size under
+	 * down_read(mmap_lock) and collide with the VMA we are about to unmap.
+	 */
+	if (downgrade) {
+		if (last && (last->vm_flags & VM_GROWSDOWN))
+			downgrade = false;
+		else if (prev && (prev->vm_flags & VM_GROWSUP))
+			downgrade = false;
+		else
+			mmap_write_downgrade(mm);
+	}
 
 	unmap_region(mm, vma, prev, start, end);
 
@@ -3212,7 +3207,7 @@ void exit_mmap(struct mm_struct *mm)
 	}
 
 	if (mm->locked_vm)
-		unlock_range(mm->mmap, ULONG_MAX);
+		unlock_range(mm->mmap, &vma, ULONG_MAX);
 
 	arch_exit_mmap(mm);
 
-- 
2.28.0



  parent reply	other threads:[~2020-12-10 17:05 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-10 17:03 [PATCH 00/28] RFC mm: Introducing the Maple Tree Liam R. Howlett
2020-12-10 17:03 ` [PATCH 01/28] radix tree test suite: Enhancements for " Liam R. Howlett
2020-12-10 17:03 ` [PATCH 02/28] radix tree test suite: Add support for fallthrough attribute Liam R. Howlett
2020-12-10 17:03 ` [PATCH 03/28] radix tree test suite: Add support for kmem_cache_free_bulk Liam R. Howlett
2020-12-10 17:03 ` [PATCH 04/28] radix tree test suite: Add keme_cache_alloc_bulk() support Liam R. Howlett
2020-12-10 17:03 ` [PATCH 05/28] Maple Tree: Add new data structure Liam R. Howlett
2020-12-10 17:03 ` [PATCH 06/28] mm: Start tracking VMAs with maple tree Liam R. Howlett
2020-12-11 19:30   ` kernel test robot
2020-12-10 17:03 ` [PATCH 07/28] mm/mmap: Introduce unlock_range() for code cleanup Liam R. Howlett
2020-12-10 17:03 ` [PATCH 08/28] mm/mmap: Change find_vma() to use the maple tree Liam R. Howlett
2020-12-10 17:03 ` [PATCH 09/28] mm/mmap: Change find_vma_prev() to use " Liam R. Howlett
2020-12-10 17:03 ` [PATCH 10/28] mm/mmap: Change unmapped_area and unmapped_area_topdown " Liam R. Howlett
2020-12-10 17:03 ` [PATCH 11/28] kernel/fork: Convert dup_mmap " Liam R. Howlett
2020-12-10 17:03 ` [PATCH 12/28] mm: Remove rb tree Liam R. Howlett
2020-12-10 17:03 ` [PATCH 13/28] mm/gup: Expose mm_populate_vma() for use when the vma is known Liam R. Howlett
2020-12-10 21:03   ` kernel test robot
2020-12-10 17:03 ` [PATCH 14/28] mm/mmap: Change do_brk_flags() to expand existing VMA and add do_brk_munmap() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 15/28] mm/mmap: Change vm_brk_flags() to use mm_populate_vma() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 16/28] mm: Move find_vma_intersection to mmap.c and change implementation to maple tree Liam R. Howlett
2020-12-10 17:03 ` [PATCH 17/28] mm/mmap: Change mmap_region to use maple tree state Liam R. Howlett
2020-12-10 17:03 ` [PATCH 18/28] mm/mmap: Drop munmap_vma_range() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 19/28] mm: Remove vmacache Liam R. Howlett
2020-12-10 17:03 ` [PATCH 20/28] mm/mmap: Change __do_munmap() to avoid unnecessary lookups Liam R. Howlett
2020-12-10 17:03 ` Liam R. Howlett [this message]
2020-12-10 17:03 ` [PATCH 22/28] mm/mmap: Move mmap_region() below do_munmap() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 23/28] mm/mmap: Add do_mas_munmap() and wraper for __do_munmap() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 24/28] mmap: Use find_vma_intersection in do_mmap() for overlap Liam R. Howlett
2020-12-10 17:03 ` [PATCH 25/28] mmap: Remove __do_munmap() in favour of do_mas_munmap() Liam R. Howlett
2020-12-10 17:04 ` [PATCH 26/28] mm/mmap: Change do_brk_munmap() to use do_mas_align_munmap() Liam R. Howlett
2020-12-10 17:04 ` [PATCH 27/28] mmap: Update count_vma_pages_range() to only use one ma_state Liam R. Howlett
2020-12-10 17:04 ` [PATCH 28/28] mmap: make remove_vma_list() inline Liam R. Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201210170402.3468568-22-Liam.Howlett@Oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@google.com \
    --cc=axelrasmussen@google.com \
    --cc=dave@stgolabs.net \
    --cc=jglisse@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    --cc=paulmck@kernel.org \
    --cc=rientjes@google.com \
    --cc=songliubraving@fb.com \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).