All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, apopple@nvidia.com, hughd@google.com,
	kirill.shutemov@linux.intel.com, linux-mm@kvack.org,
	mm-commits@vger.kernel.org, peterx@redhat.com,
	rcampbell@nvidia.com, shy828301@gmail.com,
	stable@vger.kernel.org, torvalds@linux-foundation.org,
	wangyugui@e16-tech.com, will@kernel.org, willy@infradead.org,
	ziy@nvidia.com
Subject: [patch 06/24] mm: page_vma_mapped_walk(): add a level of indentation
Date: Thu, 24 Jun 2021 18:39:17 -0700	[thread overview]
Message-ID: <20210625013917.v1e2AXMQO%akpm@linux-foundation.org> (raw)
In-Reply-To: <20210624183838.ac3161ca4a43989665ac8b2f@linux-foundation.org>

From: Hugh Dickins <hughd@google.com>
Subject: mm: page_vma_mapped_walk(): add a level of indentation

page_vma_mapped_walk() cleanup: add a level of indentation to much of the
body, making no functional change in this commit, but reducing the later
diff when this is all converted to a loop.

[hughd@google.com: : page_vma_mapped_walk(): add a level of indentation fix]
  Link: https://lkml.kernel.org/r/7f817555-3ce1-c785-e438-87d8efdcaf26@google.com
Link: https://lkml.kernel.org/r/efde211-f3e2-fe54-977-ef481419e7f3@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Wang Yugui <wangyugui@e16-tech.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 mm/page_vma_mapped.c |  105 +++++++++++++++++++++--------------------
 1 file changed, 55 insertions(+), 50 deletions(-)

--- a/mm/page_vma_mapped.c~mm-page_vma_mapped_walk-add-a-level-of-indentation
+++ a/mm/page_vma_mapped.c
@@ -173,62 +173,67 @@ bool page_vma_mapped_walk(struct page_vm
 	if (pvmw->pte)
 		goto next_pte;
 restart:
-	pgd = pgd_offset(mm, pvmw->address);
-	if (!pgd_present(*pgd))
-		return false;
-	p4d = p4d_offset(pgd, pvmw->address);
-	if (!p4d_present(*p4d))
-		return false;
-	pud = pud_offset(p4d, pvmw->address);
-	if (!pud_present(*pud))
-		return false;
-	pvmw->pmd = pmd_offset(pud, pvmw->address);
-	/*
-	 * Make sure the pmd value isn't cached in a register by the
-	 * compiler and used as a stale value after we've observed a
-	 * subsequent update.
-	 */
-	pmde = READ_ONCE(*pvmw->pmd);
-	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
-		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
-		pmde = *pvmw->pmd;
-		if (likely(pmd_trans_huge(pmde))) {
-			if (pvmw->flags & PVMW_MIGRATION)
-				return not_found(pvmw);
-			if (pmd_page(pmde) != page)
-				return not_found(pvmw);
-			return true;
-		}
-		if (!pmd_present(pmde)) {
-			swp_entry_t entry;
+	{
+		pgd = pgd_offset(mm, pvmw->address);
+		if (!pgd_present(*pgd))
+			return false;
+		p4d = p4d_offset(pgd, pvmw->address);
+		if (!p4d_present(*p4d))
+			return false;
+		pud = pud_offset(p4d, pvmw->address);
+		if (!pud_present(*pud))
+			return false;
 
-			if (!thp_migration_supported() ||
-			    !(pvmw->flags & PVMW_MIGRATION))
-				return not_found(pvmw);
-			entry = pmd_to_swp_entry(pmde);
-			if (!is_migration_entry(entry) ||
-			    migration_entry_to_page(entry) != page)
-				return not_found(pvmw);
-			return true;
-		}
-		/* THP pmd was split under us: handle on pte level */
-		spin_unlock(pvmw->ptl);
-		pvmw->ptl = NULL;
-	} else if (!pmd_present(pmde)) {
+		pvmw->pmd = pmd_offset(pud, pvmw->address);
 		/*
-		 * If PVMW_SYNC, take and drop THP pmd lock so that we
-		 * cannot return prematurely, while zap_huge_pmd() has
-		 * cleared *pmd but not decremented compound_mapcount().
+		 * Make sure the pmd value isn't cached in a register by the
+		 * compiler and used as a stale value after we've observed a
+		 * subsequent update.
 		 */
-		if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
-			spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+		pmde = READ_ONCE(*pvmw->pmd);
+
+		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+			pmde = *pvmw->pmd;
+			if (likely(pmd_trans_huge(pmde))) {
+				if (pvmw->flags & PVMW_MIGRATION)
+					return not_found(pvmw);
+				if (pmd_page(pmde) != page)
+					return not_found(pvmw);
+				return true;
+			}
+			if (!pmd_present(pmde)) {
+				swp_entry_t entry;
 
-			spin_unlock(ptl);
+				if (!thp_migration_supported() ||
+				    !(pvmw->flags & PVMW_MIGRATION))
+					return not_found(pvmw);
+				entry = pmd_to_swp_entry(pmde);
+				if (!is_migration_entry(entry) ||
+				    migration_entry_to_page(entry) != page)
+					return not_found(pvmw);
+				return true;
+			}
+			/* THP pmd was split under us: handle on pte level */
+			spin_unlock(pvmw->ptl);
+			pvmw->ptl = NULL;
+		} else if (!pmd_present(pmde)) {
+			/*
+			 * If PVMW_SYNC, take and drop THP pmd lock so that we
+			 * cannot return prematurely, while zap_huge_pmd() has
+			 * cleared *pmd but not decremented compound_mapcount().
+			 */
+			if ((pvmw->flags & PVMW_SYNC) &&
+			    PageTransCompound(page)) {
+				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+
+				spin_unlock(ptl);
+			}
+			return false;
 		}
-		return false;
+		if (!map_pte(pvmw))
+			goto next_pte;
 	}
-	if (!map_pte(pvmw))
-		goto next_pte;
 	while (1) {
 		unsigned long end;
 
_

  parent reply	other threads:[~2021-06-25  1:39 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-25  1:38 incoming Andrew Morton
2021-06-25  1:39 ` [patch 01/24] mm: page_vma_mapped_walk(): use page for pvmw->page Andrew Morton
2021-06-25  1:39 ` [patch 02/24] mm: page_vma_mapped_walk(): settle PageHuge on entry Andrew Morton
2021-06-25  1:39 ` [patch 03/24] mm: page_vma_mapped_walk(): use pmde for *pvmw->pmd Andrew Morton
2021-06-25  1:39 ` [patch 04/24] mm: page_vma_mapped_walk(): prettify PVMW_MIGRATION block Andrew Morton
2021-06-25  1:39 ` [patch 05/24] mm: page_vma_mapped_walk(): crossing page table boundary Andrew Morton
2021-06-25  1:39 ` Andrew Morton [this message]
2021-06-25  1:39 ` [patch 07/24] mm: page_vma_mapped_walk(): use goto instead of while (1) Andrew Morton
2021-06-25  1:39 ` [patch 08/24] mm: page_vma_mapped_walk(): get vma_address_end() earlier Andrew Morton
2021-06-25  1:39 ` [patch 09/24] mm/thp: fix page_vma_mapped_walk() if THP mapped by ptes Andrew Morton
2021-06-25  1:39 ` [patch 10/24] mm/thp: another PVMW_SYNC fix in page_vma_mapped_walk() Andrew Morton
2021-06-25  1:39 ` [patch 11/24] nilfs2: fix memory leak in nilfs_sysfs_delete_device_group Andrew Morton
2021-06-25  1:39 ` [patch 12/24] mm/vmalloc: add vmalloc_no_huge Andrew Morton
2021-06-25  1:39 ` [patch 13/24] KVM: s390: prepare for hugepage vmalloc Andrew Morton
2021-06-25  1:39 ` [patch 14/24] mm/vmalloc: unbreak kasan vmalloc support Andrew Morton
2021-06-25  1:39 ` [patch 15/24] kthread_worker: split code for canceling the delayed work timer Andrew Morton
2021-06-25  1:39 ` [patch 16/24] kthread: prevent deadlock when kthread_mod_delayed_work() races with kthread_cancel_delayed_work_sync() Andrew Morton
2021-06-25  1:39 ` [patch 17/24] mm, futex: fix shared futex pgoff on shmem huge page Andrew Morton
2021-06-25  1:39 ` [patch 18/24] mm/memory-failure: use a mutex to avoid memory_failure() races Andrew Morton
2021-06-25  1:39 ` [patch 19/24] mm,hwpoison: return -EHWPOISON to denote that the page has already been poisoned Andrew Morton
2021-06-25  1:40 ` [patch 20/24] mm/hwpoison: do not lock page again when me_huge_page() successfully recovers Andrew Morton
2021-06-25  1:40 ` [patch 21/24] mm/page_alloc: __alloc_pages_bulk(): do bounds check before accessing array Andrew Morton
2021-06-25  1:40 ` [patch 22/24] mm/page_alloc: do bulk array bounds check after checking populated elements Andrew Morton
2021-06-25  1:40 ` [patch 23/24] MAINTAINERS: fix Marek's identity again Andrew Morton
2021-06-25  1:40 ` [patch 24/24] mailmap: add Marek's other e-mail address and identity without diacritics Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210625013917.v1e2AXMQO%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=apopple@nvidia.com \
    --cc=hughd@google.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mm-commits@vger.kernel.org \
    --cc=peterx@redhat.com \
    --cc=rcampbell@nvidia.com \
    --cc=shy828301@gmail.com \
    --cc=stable@vger.kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=wangyugui@e16-tech.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.