All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrea Arcangeli <aarcange@redhat.com>,
	Hugh Dickins <hughd@google.com>, Rik van Riel <riel@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 12/12] mm: convert remove_migration_pte() to page_check_walk()
Date: Tue, 24 Jan 2017 19:28:24 +0300	[thread overview]
Message-ID: <20170124162824.91275-13-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20170124162824.91275-1-kirill.shutemov@linux.intel.com>

remove_migration_pte() also can easily be converted to page_check_walk().

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 mm/migrate.c | 103 ++++++++++++++++++++++++-----------------------------------
 1 file changed, 41 insertions(+), 62 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 87f4d0f81819..11c9373242e7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -193,82 +193,61 @@ void putback_movable_pages(struct list_head *l)
 /*
  * Restore a potential migration pte to a working pte entry
  */
-static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 				 unsigned long addr, void *old)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	struct page_check_walk pcw = {
+		.page = old,
+		.vma = vma,
+		.address = addr,
+		.flags = PAGE_CHECK_WALK_SYNC | PAGE_CHECK_WALK_MIGRATION,
+	};
+	struct page *new;
+	pte_t pte;
 	swp_entry_t entry;
- 	pmd_t *pmd;
-	pte_t *ptep, pte;
- 	spinlock_t *ptl;
 
-	if (unlikely(PageHuge(new))) {
-		ptep = huge_pte_offset(mm, addr);
-		if (!ptep)
-			goto out;
-		ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
-	} else {
-		pmd = mm_find_pmd(mm, addr);
-		if (!pmd)
-			goto out;
+	VM_BUG_ON_PAGE(PageTail(page), page);
+	while (page_check_walk(&pcw)) {
+		new = page - pcw.page->index +
+			linear_page_index(vma, pcw.address);
 
-		ptep = pte_offset_map(pmd, addr);
+		get_page(new);
+		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
+		if (pte_swp_soft_dirty(*pcw.pte))
+			pte = pte_mksoft_dirty(pte);
 
-		/*
-		 * Peek to check is_swap_pte() before taking ptlock?  No, we
-		 * can race mremap's move_ptes(), which skips anon_vma lock.
-		 */
-
-		ptl = pte_lockptr(mm, pmd);
-	}
-
- 	spin_lock(ptl);
-	pte = *ptep;
-	if (!is_swap_pte(pte))
-		goto unlock;
-
-	entry = pte_to_swp_entry(pte);
-
-	if (!is_migration_entry(entry) ||
-	    migration_entry_to_page(entry) != old)
-		goto unlock;
-
-	get_page(new);
-	pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
-	if (pte_swp_soft_dirty(*ptep))
-		pte = pte_mksoft_dirty(pte);
-
-	/* Recheck VMA as permissions can change since migration started  */
-	if (is_write_migration_entry(entry))
-		pte = maybe_mkwrite(pte, vma);
+		/* Recheck VMA as permissions can change since migration started  */
+		entry = pte_to_swp_entry(*pcw.pte);
+		if (is_write_migration_entry(entry))
+			pte = maybe_mkwrite(pte, vma);
 
 #ifdef CONFIG_HUGETLB_PAGE
-	if (PageHuge(new)) {
-		pte = pte_mkhuge(pte);
-		pte = arch_make_huge_pte(pte, vma, new, 0);
-	}
+		if (PageHuge(new)) {
+			pte = pte_mkhuge(pte);
+			pte = arch_make_huge_pte(pte, vma, new, 0);
+		}
 #endif
-	flush_dcache_page(new);
-	set_pte_at(mm, addr, ptep, pte);
+		flush_dcache_page(new);
+		set_pte_at(mm, pcw.address, pcw.pte, pte);
 
-	if (PageHuge(new)) {
-		if (PageAnon(new))
-			hugepage_add_anon_rmap(new, vma, addr);
+		if (PageHuge(new)) {
+			if (PageAnon(new))
+				hugepage_add_anon_rmap(new, vma, pcw.address);
+			else
+				page_dup_rmap(new, true);
+		} else if (PageAnon(new))
+			page_add_anon_rmap(new, vma, pcw.address, false);
 		else
-			page_dup_rmap(new, true);
-	} else if (PageAnon(new))
-		page_add_anon_rmap(new, vma, addr, false);
-	else
-		page_add_file_rmap(new, false);
+			page_add_file_rmap(new, false);
 
-	if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
-		mlock_vma_page(new);
+		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
+			mlock_vma_page(new);
+
+		/* No need to invalidate - it was non-present before */
+		update_mmu_cache(vma, pcw.address, pcw.pte);
+	}
 
-	/* No need to invalidate - it was non-present before */
-	update_mmu_cache(vma, addr, ptep);
-unlock:
-	pte_unmap_unlock(ptep, ptl);
-out:
 	return SWAP_AGAIN;
 }
 
-- 
2.11.0

WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrea Arcangeli <aarcange@redhat.com>,
	Hugh Dickins <hughd@google.com>, Rik van Riel <riel@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 12/12] mm: convert remove_migration_pte() to page_check_walk()
Date: Tue, 24 Jan 2017 19:28:24 +0300	[thread overview]
Message-ID: <20170124162824.91275-13-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20170124162824.91275-1-kirill.shutemov@linux.intel.com>

remove_migration_pte() also can easily be converted to page_check_walk().

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 mm/migrate.c | 103 ++++++++++++++++++++++++-----------------------------------
 1 file changed, 41 insertions(+), 62 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 87f4d0f81819..11c9373242e7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -193,82 +193,61 @@ void putback_movable_pages(struct list_head *l)
 /*
  * Restore a potential migration pte to a working pte entry
  */
-static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 				 unsigned long addr, void *old)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	struct page_check_walk pcw = {
+		.page = old,
+		.vma = vma,
+		.address = addr,
+		.flags = PAGE_CHECK_WALK_SYNC | PAGE_CHECK_WALK_MIGRATION,
+	};
+	struct page *new;
+	pte_t pte;
 	swp_entry_t entry;
- 	pmd_t *pmd;
-	pte_t *ptep, pte;
- 	spinlock_t *ptl;
 
-	if (unlikely(PageHuge(new))) {
-		ptep = huge_pte_offset(mm, addr);
-		if (!ptep)
-			goto out;
-		ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
-	} else {
-		pmd = mm_find_pmd(mm, addr);
-		if (!pmd)
-			goto out;
+	VM_BUG_ON_PAGE(PageTail(page), page);
+	while (page_check_walk(&pcw)) {
+		new = page - pcw.page->index +
+			linear_page_index(vma, pcw.address);
 
-		ptep = pte_offset_map(pmd, addr);
+		get_page(new);
+		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
+		if (pte_swp_soft_dirty(*pcw.pte))
+			pte = pte_mksoft_dirty(pte);
 
-		/*
-		 * Peek to check is_swap_pte() before taking ptlock?  No, we
-		 * can race mremap's move_ptes(), which skips anon_vma lock.
-		 */
-
-		ptl = pte_lockptr(mm, pmd);
-	}
-
- 	spin_lock(ptl);
-	pte = *ptep;
-	if (!is_swap_pte(pte))
-		goto unlock;
-
-	entry = pte_to_swp_entry(pte);
-
-	if (!is_migration_entry(entry) ||
-	    migration_entry_to_page(entry) != old)
-		goto unlock;
-
-	get_page(new);
-	pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
-	if (pte_swp_soft_dirty(*ptep))
-		pte = pte_mksoft_dirty(pte);
-
-	/* Recheck VMA as permissions can change since migration started  */
-	if (is_write_migration_entry(entry))
-		pte = maybe_mkwrite(pte, vma);
+		/* Recheck VMA as permissions can change since migration started  */
+		entry = pte_to_swp_entry(*pcw.pte);
+		if (is_write_migration_entry(entry))
+			pte = maybe_mkwrite(pte, vma);
 
 #ifdef CONFIG_HUGETLB_PAGE
-	if (PageHuge(new)) {
-		pte = pte_mkhuge(pte);
-		pte = arch_make_huge_pte(pte, vma, new, 0);
-	}
+		if (PageHuge(new)) {
+			pte = pte_mkhuge(pte);
+			pte = arch_make_huge_pte(pte, vma, new, 0);
+		}
 #endif
-	flush_dcache_page(new);
-	set_pte_at(mm, addr, ptep, pte);
+		flush_dcache_page(new);
+		set_pte_at(mm, pcw.address, pcw.pte, pte);
 
-	if (PageHuge(new)) {
-		if (PageAnon(new))
-			hugepage_add_anon_rmap(new, vma, addr);
+		if (PageHuge(new)) {
+			if (PageAnon(new))
+				hugepage_add_anon_rmap(new, vma, pcw.address);
+			else
+				page_dup_rmap(new, true);
+		} else if (PageAnon(new))
+			page_add_anon_rmap(new, vma, pcw.address, false);
 		else
-			page_dup_rmap(new, true);
-	} else if (PageAnon(new))
-		page_add_anon_rmap(new, vma, addr, false);
-	else
-		page_add_file_rmap(new, false);
+			page_add_file_rmap(new, false);
 
-	if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
-		mlock_vma_page(new);
+		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
+			mlock_vma_page(new);
+
+		/* No need to invalidate - it was non-present before */
+		update_mmu_cache(vma, pcw.address, pcw.pte);
+	}
 
-	/* No need to invalidate - it was non-present before */
-	update_mmu_cache(vma, addr, ptep);
-unlock:
-	pte_unmap_unlock(ptep, ptl);
-out:
 	return SWAP_AGAIN;
 }
 
-- 
2.11.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-01-24 16:28 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-24 16:28 [PATCH 00/12] Fix few rmap-related THP bugs Kirill A. Shutemov
2017-01-24 16:28 ` Kirill A. Shutemov
2017-01-24 16:28 ` [PATCH 01/12] uprobes: split THPs before trying replace them Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-24 18:08   ` Rik van Riel
2017-01-24 18:08     ` Rik van Riel
2017-01-24 21:28   ` Andrew Morton
2017-01-24 21:28     ` Andrew Morton
2017-01-24 22:22     ` Kirill A. Shutemov
2017-01-24 22:22       ` Kirill A. Shutemov
2017-01-24 22:35       ` Andrew Morton
2017-01-24 22:35         ` Andrew Morton
2017-01-24 22:56         ` Kirill A. Shutemov
2017-01-24 22:56           ` Kirill A. Shutemov
2017-01-25 16:55       ` Srikar Dronamraju
2017-01-25 16:55         ` Srikar Dronamraju
2017-01-25 17:44         ` Rik van Riel
2017-01-25 17:44         ` Kirill A. Shutemov
2017-01-25 17:44           ` Kirill A. Shutemov
2017-01-25 18:35         ` Johannes Weiner
2017-01-25 18:35           ` Johannes Weiner
2017-01-25 18:38           ` Kirill A. Shutemov
2017-01-25 18:38             ` Kirill A. Shutemov
2017-01-26  2:54           ` Srikar Dronamraju
2017-01-26  2:54             ` Srikar Dronamraju
2017-01-25 18:22   ` Johannes Weiner
2017-01-25 18:22     ` Johannes Weiner
2017-01-24 16:28 ` [PATCH 02/12] mm: introduce page_check_walk() Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-24 21:41   ` Andrew Morton
2017-01-24 21:41     ` Andrew Morton
2017-01-24 22:50     ` Kirill A. Shutemov
2017-01-24 22:50       ` Kirill A. Shutemov
2017-01-24 22:55       ` Andrew Morton
2017-01-24 22:55         ` Andrew Morton
2017-01-25 17:53         ` Kirill A. Shutemov
2017-01-25 17:53           ` Kirill A. Shutemov
2017-01-25  1:19   ` kbuild test robot
2017-01-25  1:19     ` kbuild test robot
2017-01-25  1:59   ` kbuild test robot
2017-01-25  1:59     ` kbuild test robot
2017-01-24 16:28 ` [PATCH 03/12] mm: fix handling PTE-mapped THPs in page_referenced() Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-24 16:28 ` [PATCH 04/12] mm: fix handling PTE-mapped THPs in page_idle_clear_pte_refs() Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-24 16:28 ` [PATCH 05/12] mm, rmap: check all VMAs that PTE-mapped THP can be part of Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-24 16:28 ` [PATCH 06/12] mm: convert page_mkclean_one() to page_check_walk() Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-25  1:44   ` kbuild test robot
2017-01-25  1:44     ` kbuild test robot
2017-01-25  2:00   ` kbuild test robot
2017-01-25  2:00     ` kbuild test robot
2017-01-24 16:28 ` [PATCH 07/12] mm: convert try_to_unmap_one() " Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-25  3:13   ` kbuild test robot
2017-01-25  3:13     ` kbuild test robot
2017-01-24 16:28 ` [PATCH 08/12] mm, ksm: convert write_protect_page() " Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-24 16:28 ` [PATCH 09/12] mm, uprobes: convert __replace_page() " Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-26  2:58   ` Srikar Dronamraju
2017-01-26  2:58     ` Srikar Dronamraju
2017-01-24 16:28 ` [PATCH 10/12] mm: convert page_mapped_in_vma() " Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-24 16:28 ` [PATCH 11/12] mm: drop page_check_address{,_transhuge} Kirill A. Shutemov
2017-01-24 16:28   ` Kirill A. Shutemov
2017-01-24 16:28 ` Kirill A. Shutemov [this message]
2017-01-24 16:28   ` [PATCH 12/12] mm: convert remove_migration_pte() to page_check_walk() Kirill A. Shutemov
2017-01-25  1:54   ` kbuild test robot
2017-01-25  1:54     ` kbuild test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170124162824.91275-13-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=hughd@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=riel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.