All of lore.kernel.org
 help / color / mirror / Atom feed
From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
	Mike Rapoport <rppt@kernel.org>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Matthew Wilcox <willy@infradead.org>,
	David Hildenbrand <david@redhat.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Qi Zheng <zhengqi.arch@bytedance.com>,
	Yang Shi <shy828301@gmail.com>,
	Mel Gorman <mgorman@techsingularity.net>,
	Peter Xu <peterx@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Will Deacon <will@kernel.org>, Yu Zhao <yuzhao@google.com>,
	Alistair Popple <apopple@nvidia.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	Ira Weiny <ira.weiny@intel.com>,
	Steven Price <steven.price@arm.com>,
	SeongJae Park <sj@kernel.org>,
	Naoya Horiguchi <naoya.horiguchi@nec.com>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Zack Rusin <zackr@vmware.com>, Jason Gunthorpe <jgg@ziepe.ca>,
	Axel Rasmussen <axelrasmussen@google.com>,
	Anshuman Khandual <anshuman.khandual@arm.com>,
	Pasha Tatashin <pasha.tatashin@soleen.com>,
	Miaohe Lin <linmiaohe@huawei.com>,
	Minchan Kim <minchan@kernel.org>,
	Christoph Hellwig <hch@infradead.org>, Song Liu <song@kernel.org>,
	Thomas Hellstrom <thomas.hellstrom@linux.intel.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 18/31] mm/mprotect: delete pmd_none_or_clear_bad_unless_trans_huge()
Date: Sun, 21 May 2023 22:12:08 -0700 (PDT)	[thread overview]
Message-ID: <4a834932-9064-9ed7-3cd1-99466f549486@google.com> (raw)
In-Reply-To: <68a97fbe-5c1e-7ac6-72c-7b9c6290b370@google.com>

change_pmd_range() had special pmd_none_or_clear_bad_unless_trans_huge(),
required to avoid "bad" choices when setting automatic NUMA hinting under
mmap_read_lock(); but most of that is already covered in pte_offset_map()
now.  change_pmd_range() just wants a pmd_none() check before wasting
time on MMU notifiers, then checks on the read-once _pmd value to work
out what's needed for huge cases.  If change_pte_range() returns -EAGAIN
to retry if pte_offset_map_lock() fails, nothing more special is needed.

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 mm/mprotect.c | 74 ++++++++++++---------------------------------------
 1 file changed, 17 insertions(+), 57 deletions(-)

diff --git a/mm/mprotect.c b/mm/mprotect.c
index c5a13c0f1017..64e1df0af514 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -93,22 +93,9 @@ static long change_pte_range(struct mmu_gather *tlb,
 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
 
 	tlb_change_page_size(tlb, PAGE_SIZE);
-
-	/*
-	 * Can be called with only the mmap_lock for reading by
-	 * prot_numa so we must check the pmd isn't constantly
-	 * changing from under us from pmd_none to pmd_trans_huge
-	 * and/or the other way around.
-	 */
-	if (pmd_trans_unstable(pmd))
-		return 0;
-
-	/*
-	 * The pmd points to a regular pte so the pmd can't change
-	 * from under us even if the mmap_lock is only hold for
-	 * reading.
-	 */
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+	if (!pte)
+		return -EAGAIN;
 
 	/* Get target node for single threaded private VMAs */
 	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
@@ -301,26 +288,6 @@ static long change_pte_range(struct mmu_gather *tlb,
 	return pages;
 }
 
-/*
- * Used when setting automatic NUMA hinting protection where it is
- * critical that a numa hinting PMD is not confused with a bad PMD.
- */
-static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
-{
-	pmd_t pmdval = pmdp_get_lockless(pmd);
-
-	if (pmd_none(pmdval))
-		return 1;
-	if (pmd_trans_huge(pmdval))
-		return 0;
-	if (unlikely(pmd_bad(pmdval))) {
-		pmd_clear_bad(pmd);
-		return 1;
-	}
-
-	return 0;
-}
-
 /*
  * Return true if we want to split THPs into PTE mappings in change
  * protection procedure, false otherwise.
@@ -398,7 +365,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
 	pmd = pmd_offset(pud, addr);
 	do {
 		long ret;
-
+		pmd_t _pmd;
+again:
 		next = pmd_addr_end(addr, end);
 
 		ret = change_pmd_prepare(vma, pmd, cp_flags);
@@ -406,16 +374,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
 			pages = ret;
 			break;
 		}
-		/*
-		 * Automatic NUMA balancing walks the tables with mmap_lock
-		 * held for read. It's possible a parallel update to occur
-		 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
-		 * check leading to a false positive and clearing.
-		 * Hence, it's necessary to atomically read the PMD value
-		 * for all the checks.
-		 */
-		if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
-		     pmd_none_or_clear_bad_unless_trans_huge(pmd))
+
+		if (pmd_none(*pmd))
 			goto next;
 
 		/* invoke the mmu notifier if the pmd is populated */
@@ -426,7 +386,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
 			mmu_notifier_invalidate_range_start(&range);
 		}
 
-		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
+		_pmd = pmdp_get_lockless(pmd);
+		if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
 			if ((next - addr != HPAGE_PMD_SIZE) ||
 			    pgtable_split_needed(vma, cp_flags)) {
 				__split_huge_pmd(vma, pmd, addr, false, NULL);
@@ -441,15 +402,10 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
 					break;
 				}
 			} else {
-				/*
-				 * change_huge_pmd() does not defer TLB flushes,
-				 * so no need to propagate the tlb argument.
-				 */
-				int nr_ptes = change_huge_pmd(tlb, vma, pmd,
+				ret = change_huge_pmd(tlb, vma, pmd,
 						addr, newprot, cp_flags);
-
-				if (nr_ptes) {
-					if (nr_ptes == HPAGE_PMD_NR) {
+				if (ret) {
+					if (ret == HPAGE_PMD_NR) {
 						pages += HPAGE_PMD_NR;
 						nr_huge_updates++;
 					}
@@ -460,8 +416,12 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
 			}
 			/* fall through, the trans huge pmd just split */
 		}
-		pages += change_pte_range(tlb, vma, pmd, addr, next,
-					  newprot, cp_flags);
+
+		ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
+				       cp_flags);
+		if (ret < 0)
+			goto again;
+		pages += ret;
 next:
 		cond_resched();
 	} while (pmd++, addr = next, addr != end);
-- 
2.35.3


  parent reply	other threads:[~2023-05-22  5:12 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-22  4:46 [PATCH 00/31] mm: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22  4:49 ` [PATCH 01/31] mm: use pmdp_get_lockless() without surplus barrier() Hugh Dickins
2023-05-24 22:29   ` Peter Xu
2023-05-25 22:35     ` Hugh Dickins
2023-05-26 16:48       ` Peter Xu
2023-06-02  2:31         ` Hugh Dickins
2023-05-24 22:54   ` Yu Zhao
2023-05-22  4:51 ` [PATCH 02/31] mm/migrate: remove cruft from migration_entry_wait()s Hugh Dickins
2023-05-23  1:45   ` Alistair Popple
2023-05-24  1:57     ` Hugh Dickins
2023-05-22  4:52 ` [PATCH 03/31] mm/pgtable: kmap_local_page() instead of kmap_atomic() Hugh Dickins
2023-05-26 22:22   ` Peter Xu
2023-05-26 22:42     ` Peter Xu
2023-05-22  4:53 ` [PATCH 04/31] mm/pgtable: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22 11:17   ` Qi Zheng
2023-05-24  2:22     ` Hugh Dickins
2023-05-24  3:11       ` Qi Zheng
2023-07-05 14:48   ` Aneesh Kumar K.V
2023-07-05 22:26     ` Hugh Dickins
2023-05-22  4:54 ` [PATCH 05/31] mm/filemap: allow pte_offset_map_lock() " Hugh Dickins
2023-05-22 11:23   ` Qi Zheng
2023-05-24  2:35     ` Hugh Dickins
2023-05-24  3:14       ` Qi Zheng
2023-05-22  4:55 ` [PATCH 06/31] mm/page_vma_mapped: delete bogosity in page_vma_mapped_walk() Hugh Dickins
2023-05-22  4:57 ` [PATCH 07/31] mm/page_vma_mapped: reformat map_pte() with less indentation Hugh Dickins
2023-05-22  4:58 ` [PATCH 08/31] mm/page_vma_mapped: pte_offset_map_nolock() not pte_lockptr() Hugh Dickins
2023-05-22 11:41   ` Qi Zheng
2023-05-24  2:44     ` Hugh Dickins
2023-05-22  5:00 ` [PATCH 09/31] mm/pagewalkers: ACTION_AGAIN if pte_offset_map_lock() fails Hugh Dickins
2023-05-23 18:07   ` SeongJae Park
2023-05-22  5:01 ` [PATCH 10/31] mm/pagewalk: walk_pte_range() allow for pte_offset_map() Hugh Dickins
2023-05-22  5:03 ` [PATCH 11/31] mm/vmwgfx: simplify pmd & pud mapping dirty helpers Hugh Dickins
2023-05-22  5:04 ` [PATCH 12/31] mm/vmalloc: vmalloc_to_page() use pte_offset_kernel() Hugh Dickins
2023-05-22  7:27   ` Lorenzo Stoakes
2023-05-22  5:05 ` [PATCH 13/31] mm/hmm: retry if pte_offset_map() fails Hugh Dickins
2023-05-22 12:11   ` Qi Zheng
2023-05-23  2:39     ` Alistair Popple
2023-05-23  6:06       ` Qi Zheng
2023-05-24  2:50         ` Hugh Dickins
2023-05-24  5:16           ` Alistair Popple
2023-05-22  5:06 ` [PATCH 14/31] fs/userfaultfd: " Hugh Dickins
2023-05-24 22:31   ` Peter Xu
2023-05-22  5:07 ` [PATCH 15/31] mm/userfaultfd: allow pte_offset_map_lock() to fail Hugh Dickins
2023-05-24 22:44   ` Peter Xu
2023-05-25 22:06     ` Hugh Dickins
2023-05-26 16:25       ` Peter Xu
2023-05-22  5:08 ` [PATCH 16/31] mm/debug_vm_pgtable,page_table_check: warn pte map fails Hugh Dickins
2023-05-22  5:10 ` [PATCH 17/31] mm/various: give up if pte_offset_map[_lock]() fails Hugh Dickins
2023-05-22 12:24   ` Qi Zheng
2023-05-22 12:37     ` Qi Zheng
2023-05-24  3:20       ` Hugh Dickins
2023-05-22  5:12 ` Hugh Dickins [this message]
2023-05-22  5:13 ` [PATCH 19/31] mm/mremap: retry if either pte_offset_map_*lock() fails Hugh Dickins
2023-05-22  5:15 ` [PATCH 20/31] mm/madvise: clean up pte_offset_map_lock() scans Hugh Dickins
2023-05-22  5:17 ` [PATCH 21/31] mm/madvise: clean up force_shm_swapin_readahead() Hugh Dickins
2023-05-22  5:18 ` [PATCH 22/31] mm/swapoff: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22  5:19 ` [PATCH 23/31] mm/mglru: allow pte_offset_map_nolock() " Hugh Dickins
2023-05-22  5:26   ` Yu Zhao
2023-05-22  5:20 ` [PATCH 24/31] mm/migrate_device: allow pte_offset_map_lock() " Hugh Dickins
2023-05-23  2:23   ` Alistair Popple
2023-05-24  3:45     ` Hugh Dickins
2023-05-24  5:11       ` Alistair Popple
2023-05-22  5:22 ` [PATCH 25/31] mm/gup: remove FOLL_SPLIT_PMD use of pmd_trans_unstable() Hugh Dickins
2023-05-23  2:26   ` Yang Shi
2023-05-23  2:44     ` Yang Shi
2023-05-24  4:26       ` Hugh Dickins
2023-05-24 22:45         ` Yang Shi
2023-05-25 21:16           ` Hugh Dickins
2023-05-25 22:33             ` Yang Shi
2023-05-22  5:23 ` [PATCH 26/31] mm/huge_memory: split huge pmd under one pte_offset_map() Hugh Dickins
2023-05-22 23:35   ` Yang Shi
2023-05-22  5:24 ` [PATCH 27/31] mm/khugepaged: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22 23:54   ` Yang Shi
2023-05-24  4:44     ` Hugh Dickins
2023-05-24 21:59       ` Yang Shi
2023-05-22  5:25 ` [PATCH 28/31] mm/memory: " Hugh Dickins
2023-05-22  5:26 ` [PATCH 29/31] mm/memory: handle_pte_fault() use pte_offset_map_nolock() Hugh Dickins
2023-05-22 12:52   ` Qi Zheng
2023-05-24  4:54     ` Hugh Dickins
2023-05-22  5:27 ` [PATCH 30/31] mm/pgtable: delete pmd_trans_unstable() and friends Hugh Dickins
2023-05-22  5:29 ` [PATCH 31/31] perf/core: Allow pte_offset_map() to fail Hugh Dickins

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4a834932-9064-9ed7-3cd1-99466f549486@google.com \
    --to=hughd@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=anshuman.khandual@arm.com \
    --cc=apopple@nvidia.com \
    --cc=axelrasmussen@google.com \
    --cc=christophe.leroy@csgroup.eu \
    --cc=david@redhat.com \
    --cc=hch@infradead.org \
    --cc=ira.weiny@intel.com \
    --cc=jgg@ziepe.ca \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linmiaohe@huawei.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@techsingularity.net \
    --cc=mike.kravetz@oracle.com \
    --cc=minchan@kernel.org \
    --cc=naoya.horiguchi@nec.com \
    --cc=pasha.tatashin@soleen.com \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rcampbell@nvidia.com \
    --cc=rppt@kernel.org \
    --cc=shy828301@gmail.com \
    --cc=sj@kernel.org \
    --cc=song@kernel.org \
    --cc=steven.price@arm.com \
    --cc=surenb@google.com \
    --cc=thomas.hellstrom@linux.intel.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=yuzhao@google.com \
    --cc=zackr@vmware.com \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.