linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>, Vlastimil Babka <vbabka@suse.cz>,
	"Kirill A. Shutemov" <kirill@shutemov.name>,
	Matthew Wilcox <willy@infradead.org>,
	David Hildenbrand <david@redhat.com>,
	Alistair Popple <apopple@nvidia.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Rik van Riel <riel@surriel.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Yu Zhao <yuzhao@google.com>, Greg Thelen <gthelen@google.com>,
	Shakeel Butt <shakeelb@google.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 03/13] mm/munlock: delete munlock_vma_pages_all(), allow oomreap
Date: Sun, 6 Feb 2022 13:34:51 -0800 (PST)	[thread overview]
Message-ID: <8dddb3d4-361-da5-538-3f3ae1b326b@google.com> (raw)
In-Reply-To: <8e4356d-9622-a7f0-b2c-f116b5f2efea@google.com>

munlock_vma_pages_range() will still be required, when munlocking but
not munmapping a set of pages; but when unmapping a pte, the mlock count
will be maintained in much the same way as it will be maintained when
mapping in the pte.  Which removes the need for munlock_vma_pages_all()
on mlocked vmas when munmapping or exiting: eliminating the catastrophic
contention on i_mmap_rwsem, and the need for page lock on the pages.

There is still a need to update locked_vm accounting according to the
munmapped vmas when munmapping: do that in detach_vmas_to_be_unmapped().
exit_mmap() does not need locked_vm updates, so delete unlock_range().

And wasn't I the one who forbade the OOM reaper to attack mlocked vmas,
because of the uncertainty in blocking on all those page locks?
No fear of that now, so permit the OOM reaper on mlocked vmas.

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 mm/internal.h | 16 ++--------------
 mm/madvise.c  |  5 +++++
 mm/mlock.c    |  4 ++--
 mm/mmap.c     | 32 ++------------------------------
 mm/oom_kill.c |  2 +-
 5 files changed, 12 insertions(+), 47 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index e48c486d5ddf..f235aa92e564 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -71,11 +71,6 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 		unsigned long floor, unsigned long ceiling);
 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
 
-static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
-{
-	return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
-}
-
 struct zap_details;
 void unmap_page_range(struct mmu_gather *tlb,
 			     struct vm_area_struct *vma,
@@ -398,12 +393,8 @@ extern long populate_vma_page_range(struct vm_area_struct *vma,
 extern long faultin_vma_page_range(struct vm_area_struct *vma,
 				   unsigned long start, unsigned long end,
 				   bool write, int *locked);
-extern void munlock_vma_pages_range(struct vm_area_struct *vma,
-			unsigned long start, unsigned long end);
-static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
-{
-	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
-}
+extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
+			      unsigned long len);
 
 /*
  * must be called with vma's mmap_lock held for read or write, and page locked.
@@ -411,9 +402,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
 extern void mlock_vma_page(struct page *page);
 extern void munlock_vma_page(struct page *page);
 
-extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
-			      unsigned long len);
-
 /*
  * Clear the page's PageMlocked().  This can be useful in a situation where
  * we want to unconditionally remove a page from the pagecache -- e.g.,
diff --git a/mm/madvise.c b/mm/madvise.c
index 5604064df464..ae35d72627ef 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -530,6 +530,11 @@ static void madvise_cold_page_range(struct mmu_gather *tlb,
 	tlb_end_vma(tlb, vma);
 }
 
+static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
+{
+	return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
+}
+
 static long madvise_cold(struct vm_area_struct *vma,
 			struct vm_area_struct **prev,
 			unsigned long start_addr, unsigned long end_addr)
diff --git a/mm/mlock.c b/mm/mlock.c
index 544c18ce2c58..d148da934fe9 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -137,8 +137,8 @@ void munlock_vma_page(struct page *page)
  * Returns with VM_LOCKED cleared.  Callers must be prepared to
  * deal with this.
  */
-void munlock_vma_pages_range(struct vm_area_struct *vma,
-			     unsigned long start, unsigned long end)
+static void munlock_vma_pages_range(struct vm_area_struct *vma,
+				    unsigned long start, unsigned long end)
 {
 	/* Reimplementation to follow in later commit */
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index 1e8fdb0b51ed..64b5985b5295 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2674,6 +2674,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
 	vma->vm_prev = NULL;
 	do {
 		vma_rb_erase(vma, &mm->mm_rb);
+		if (vma->vm_flags & VM_LOCKED)
+			mm->locked_vm -= vma_pages(vma);
 		mm->map_count--;
 		tail_vma = vma;
 		vma = vma->vm_next;
@@ -2778,22 +2780,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	return __split_vma(mm, vma, addr, new_below);
 }
 
-static inline void
-unlock_range(struct vm_area_struct *start, unsigned long limit)
-{
-	struct mm_struct *mm = start->vm_mm;
-	struct vm_area_struct *tmp = start;
-
-	while (tmp && tmp->vm_start < limit) {
-		if (tmp->vm_flags & VM_LOCKED) {
-			mm->locked_vm -= vma_pages(tmp);
-			munlock_vma_pages_all(tmp);
-		}
-
-		tmp = tmp->vm_next;
-	}
-}
-
 /* Munmap is split into 2 main parts -- this part which finds
  * what needs doing, and the areas themselves, which do the
  * work.  This now handles partial unmappings.
@@ -2874,12 +2860,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 			return error;
 	}
 
-	/*
-	 * unlock any mlock()ed ranges before detaching vmas
-	 */
-	if (mm->locked_vm)
-		unlock_range(vma, end);
-
 	/* Detach vmas from rbtree */
 	if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
 		downgrade = false;
@@ -3147,20 +3127,12 @@ void exit_mmap(struct mm_struct *mm)
 		 * Nothing can be holding mm->mmap_lock here and the above call
 		 * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
 		 * __oom_reap_task_mm() will not block.
-		 *
-		 * This needs to be done before calling unlock_range(),
-		 * which clears VM_LOCKED, otherwise the oom reaper cannot
-		 * reliably test it.
 		 */
 		(void)__oom_reap_task_mm(mm);
-
 		set_bit(MMF_OOM_SKIP, &mm->flags);
 	}
 
 	mmap_write_lock(mm);
-	if (mm->locked_vm)
-		unlock_range(mm->mmap, ULONG_MAX);
-
 	arch_exit_mmap(mm);
 
 	vma = mm->mmap;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 832fb330376e..6b875acabd1e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -526,7 +526,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
 	set_bit(MMF_UNSTABLE, &mm->flags);
 
 	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
-		if (!can_madv_lru_vma(vma))
+		if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP))
 			continue;
 
 		/*
-- 
2.34.1


  parent reply	other threads:[~2022-02-06 21:34 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-06 21:27 [PATCH 00/13] mm/munlock: rework of mlock+munlock page handling Hugh Dickins
2022-02-06 21:30 ` [PATCH 01/13] mm/munlock: delete page_mlock() and all its works Hugh Dickins
2022-02-09 18:31   ` Vlastimil Babka
2022-02-09 22:28     ` Hugh Dickins
2022-02-10  9:52       ` Vlastimil Babka
2022-02-14  6:52         ` Hugh Dickins
2022-02-14  6:59           ` [PATCH v2 " Hugh Dickins
2022-02-14 10:07             ` Vlastimil Babka
2022-02-06 21:32 ` [PATCH 02/13] mm/munlock: delete FOLL_MLOCK and FOLL_POPULATE Hugh Dickins
2022-02-10 11:35   ` Vlastimil Babka
2022-02-06 21:34 ` Hugh Dickins [this message]
2022-02-10 15:30   ` [PATCH 03/13] mm/munlock: delete munlock_vma_pages_all(), allow oomreap Vlastimil Babka
2022-02-06 21:36 ` [PATCH 04/13] mm/munlock: rmap call mlock_vma_page() munlock_vma_page() Hugh Dickins
2022-02-11 10:29   ` Vlastimil Babka
2022-02-14  7:05     ` [PATCH v2 " Hugh Dickins
2022-02-06 21:38 ` [PATCH 05/13] mm/munlock: replace clear_page_mlock() by final clearance Hugh Dickins
2022-02-11 11:42   ` Vlastimil Babka
2022-02-06 21:38 ` [PATCH 00/13] mm/munlock: rework of mlock+munlock page handling Matthew Wilcox
2022-02-07 18:20   ` Hugh Dickins
2022-02-06 21:40 ` [PATCH 06/13] mm/munlock: maintain page->mlock_count while unevictable Hugh Dickins
2022-02-11 12:27   ` Vlastimil Babka
2022-02-14  5:42     ` Hugh Dickins
2022-02-11 18:07   ` Vlastimil Babka
2022-02-14  6:28     ` Hugh Dickins
2022-02-06 21:42 ` [PATCH 07/13] mm/munlock: mlock_pte_range() when mlocking or munlocking Hugh Dickins
2022-02-11 16:45   ` Vlastimil Babka
2022-02-14  6:32     ` Hugh Dickins
2022-02-14  7:09       ` [PATCH v2 " Hugh Dickins
2022-02-06 21:43 ` [PATCH 08/13] mm/migrate: __unmap_and_move() push good newpage to LRU Hugh Dickins
2022-02-11 17:19   ` Vlastimil Babka
2022-02-06 21:45 ` [PATCH 09/13] mm/munlock: delete smp_mb() from __pagevec_lru_add_fn() Hugh Dickins
2022-02-11 17:43   ` Vlastimil Babka
2022-02-06 21:47 ` [PATCH 10/13] mm/munlock: mlock_page() munlock_page() batch by pagevec Hugh Dickins
2022-02-09  8:15   ` Geert Uytterhoeven
2022-02-09 15:45     ` Hugh Dickins
2022-02-11 18:26   ` Vlastimil Babka
2022-02-14  7:15     ` [PATCH v2 " Hugh Dickins
2022-02-06 21:49 ` [PATCH 11/13] mm/munlock: page migration needs mlock pagevec drained Hugh Dickins
2022-02-11 18:49   ` Vlastimil Babka
2022-02-14  5:34     ` Hugh Dickins
2022-02-14  7:17       ` [PATCH v2 " Hugh Dickins
2022-02-06 21:51 ` [PATCH 12/13] mm/thp: collapse_file() do try_to_unmap(TTU_BATCH_FLUSH) Hugh Dickins
2022-02-06 21:53 ` [PATCH 13/13] mm/thp: shrink_page_list() avoid splitting VM_LOCKED THP Hugh Dickins
     [not found] ` <20220207033518.2602-1-hdanton@sina.com>
2022-02-07 18:46   ` [PATCH 07/13] mm/munlock: mlock_pte_range() when mlocking or munlocking Hugh Dickins
2022-02-09 15:35 ` [PATCH 00/13] mm/munlock: rework of mlock+munlock page handling Michal Hocko
2022-02-09 16:21   ` Hugh Dickins
2022-02-09 21:01     ` Michal Hocko
2022-02-09 22:59       ` Hugh Dickins
2022-02-10  7:49         ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8dddb3d4-361-da5-538-3f3ae1b326b@google.com \
    --to=hughd@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=apopple@nvidia.com \
    --cc=david@redhat.com \
    --cc=gthelen@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.com \
    --cc=riel@surriel.com \
    --cc=shakeelb@google.com \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    --cc=yuzhao@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).