All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/9] Finish two folio conversions
@ 2023-12-11 16:22 Matthew Wilcox (Oracle)
  2023-12-11 16:22 ` [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios Matthew Wilcox (Oracle)
                   ` (8 more replies)
  0 siblings, 9 replies; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Most callers of page_add_new_anon_rmap() and
lru_cache_add_inactive_or_unevictable() have been converted to their
folio equivalents, but there are still a few stragglers.  There's a
bit of preparatory work in ksm and unuse_pte(), but after that it's
pretty mechanical.

Patch series against next-20231207.

Matthew Wilcox (Oracle) (9):
  mm: Convert ksm_might_need_to_copy() to work on folios
  mm: Simplify the assertions in unuse_pte()
  mm: Convert unuse_pte() to use a folio throughout
  mm: Remove some calls to page_add_new_anon_rmap()
  mm: Remove stale example from comment
  mm: Remove references to page_add_new_anon_rmap in comments
  mm: Convert migrate_vma_insert_page() to use a folio
  mm: Convert collapse_huge_page() to use a folio
  mm: Remove page_add_new_anon_rmap and
    lru_cache_add_inactive_or_unevictable

 include/linux/ksm.h     |  6 ++---
 include/linux/rmap.h    |  2 --
 include/linux/swap.h    |  3 ---
 kernel/events/uprobes.c |  2 +-
 mm/folio-compat.c       | 16 -------------
 mm/khugepaged.c         | 15 ++++++------
 mm/ksm.c                | 21 ++++++++--------
 mm/memory.c             | 10 ++++----
 mm/memremap.c           | 18 ++++----------
 mm/migrate_device.c     | 23 +++++++++---------
 mm/rmap.c               |  4 ++--
 mm/swapfile.c           | 53 ++++++++++++++++++++++-------------------
 mm/userfaultfd.c        |  2 +-
 13 files changed, 76 insertions(+), 99 deletions(-)

-- 
2.42.0



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-12 12:32   ` David Hildenbrand
  2023-12-11 16:22 ` [PATCH 2/9] mm: Simplify the assertions in unuse_pte() Matthew Wilcox (Oracle)
                   ` (7 subsequent siblings)
  8 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Accept a folio as an argument and return a folio result.  Removes
a call to compound_head() in do_swap_page(), and prevents folio &
page from getting out of sync in unuse_pte().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/ksm.h |  6 +++---
 mm/ksm.c            | 21 +++++++++++----------
 mm/memory.c         |  8 ++++----
 mm/swapfile.c       |  8 +++++---
 4 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 4643d5244e77..401348e9f92b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -76,7 +76,7 @@ static inline void ksm_exit(struct mm_struct *mm)
  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  * but what if the vma was unmerged while the page was swapped out?
  */
-struct page *ksm_might_need_to_copy(struct page *page,
+struct folio *ksm_might_need_to_copy(struct folio *folio,
 			struct vm_area_struct *vma, unsigned long addr);
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
@@ -129,10 +129,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 	return 0;
 }
 
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
 			struct vm_area_struct *vma, unsigned long addr)
 {
-	return page;
+	return folio;
 }
 
 static inline void rmap_walk_ksm(struct folio *folio,
diff --git a/mm/ksm.c b/mm/ksm.c
index b93389a3780e..16532fa85a46 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2875,30 +2875,30 @@ void __ksm_exit(struct mm_struct *mm)
 	trace_ksm_exit(mm);
 }
 
-struct page *ksm_might_need_to_copy(struct page *page,
+struct folio *ksm_might_need_to_copy(struct folio *folio,
 			struct vm_area_struct *vma, unsigned long addr)
 {
-	struct folio *folio = page_folio(page);
+	struct page *page = folio_page(folio, 0);
 	struct anon_vma *anon_vma = folio_anon_vma(folio);
 	struct folio *new_folio;
 
 	if (folio_test_large(folio))
-		return page;
+		return folio;
 
 	if (folio_test_ksm(folio)) {
 		if (folio_stable_node(folio) &&
 		    !(ksm_run & KSM_RUN_UNMERGE))
-			return page;	/* no need to copy it */
+			return folio;	/* no need to copy it */
 	} else if (!anon_vma) {
-		return page;		/* no need to copy it */
+		return folio;		/* no need to copy it */
 	} else if (folio->index == linear_page_index(vma, addr) &&
 			anon_vma->root == vma->anon_vma->root) {
-		return page;		/* still no need to copy it */
+		return folio;		/* still no need to copy it */
 	}
 	if (PageHWPoison(page))
 		return ERR_PTR(-EHWPOISON);
 	if (!folio_test_uptodate(folio))
-		return page;		/* let do_swap_page report the error */
+		return folio;		/* let do_swap_page report the error */
 
 	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
 	if (new_folio &&
@@ -2907,9 +2907,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
 		new_folio = NULL;
 	}
 	if (new_folio) {
-		if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) {
+		if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
+								addr, vma)) {
 			folio_put(new_folio);
-			memory_failure_queue(page_to_pfn(page), 0);
+			memory_failure_queue(folio_pfn(folio), 0);
 			return ERR_PTR(-EHWPOISON);
 		}
 		folio_set_dirty(new_folio);
@@ -2920,7 +2921,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 #endif
 	}
 
-	return new_folio ? &new_folio->page : NULL;
+	return new_folio;
 }
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
diff --git a/mm/memory.c b/mm/memory.c
index 055647120f01..318f923134e4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3942,15 +3942,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 		 * page->index of !PageKSM() pages would be nonlinear inside the
 		 * anon VMA -- PageKSM() is lost on actual swapout.
 		 */
-		page = ksm_might_need_to_copy(page, vma, vmf->address);
-		if (unlikely(!page)) {
+		folio = ksm_might_need_to_copy(folio, vma, vmf->address);
+		if (unlikely(!folio)) {
 			ret = VM_FAULT_OOM;
 			goto out_page;
-		} else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+		} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
 			ret = VM_FAULT_HWPOISON;
 			goto out_page;
 		}
-		folio = page_folio(page);
+		page = folio_page(folio, 0);
 
 		/*
 		 * If we want to map a page that's in the swapcache writable, we
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 8be70912e298..0371b7b3cd27 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1749,11 +1749,13 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 	int ret = 1;
 
 	swapcache = page;
-	page = ksm_might_need_to_copy(page, vma, addr);
-	if (unlikely(!page))
+	folio = ksm_might_need_to_copy(folio, vma, addr);
+	if (unlikely(!folio))
 		return -ENOMEM;
-	else if (unlikely(PTR_ERR(page) == -EHWPOISON))
+	else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
 		hwpoisoned = true;
+	else
+		page = folio_file_page(folio, swp_offset(entry));
 
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 2/9] mm: Simplify the assertions in unuse_pte()
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
  2023-12-11 16:22 ` [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-12 12:26   ` David Hildenbrand
  2023-12-11 16:22 ` [PATCH 3/9] mm: Convert unuse_pte() to use a folio throughout Matthew Wilcox (Oracle)
                   ` (6 subsequent siblings)
  8 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

We should only see anon folios in this function (and there are many
assumptions of that already), so we can simplify these two assertions.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/swapfile.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0371b7b3cd27..88842c6fb8fe 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1789,8 +1789,8 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 	arch_swap_restore(entry, page_folio(page));
 
 	/* See do_swap_page() */
-	BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
-	BUG_ON(PageAnon(page) && PageAnonExclusive(page));
+	VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
+	VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
 
 	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
 	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 3/9] mm: Convert unuse_pte() to use a folio throughout
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
  2023-12-11 16:22 ` [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios Matthew Wilcox (Oracle)
  2023-12-11 16:22 ` [PATCH 2/9] mm: Simplify the assertions in unuse_pte() Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-11 16:22 ` [PATCH 4/9] mm: Remove some calls to page_add_new_anon_rmap() Matthew Wilcox (Oracle)
                   ` (5 subsequent siblings)
  8 siblings, 0 replies; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Saves about eight calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/swapfile.c | 47 +++++++++++++++++++++++++----------------------
 1 file changed, 25 insertions(+), 22 deletions(-)

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 88842c6fb8fe..21eced2d1f80 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1741,21 +1741,25 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 		unsigned long addr, swp_entry_t entry, struct folio *folio)
 {
-	struct page *page = folio_file_page(folio, swp_offset(entry));
-	struct page *swapcache;
+	struct page *page;
+	struct folio *swapcache;
 	spinlock_t *ptl;
 	pte_t *pte, new_pte, old_pte;
-	bool hwpoisoned = PageHWPoison(page);
+	bool hwpoisoned = false;
 	int ret = 1;
 
-	swapcache = page;
+	swapcache = folio;
 	folio = ksm_might_need_to_copy(folio, vma, addr);
 	if (unlikely(!folio))
 		return -ENOMEM;
-	else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
+	else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
+		hwpoisoned = true;
+		folio = swapcache;
+	}
+
+	page = folio_file_page(folio, swp_offset(entry));
+	if (PageHWPoison(page))
 		hwpoisoned = true;
-	else
-		page = folio_file_page(folio, swp_offset(entry));
 
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
@@ -1766,13 +1770,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 
 	old_pte = ptep_get(pte);
 
-	if (unlikely(hwpoisoned || !PageUptodate(page))) {
+	if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
 		swp_entry_t swp_entry;
 
 		dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
 		if (hwpoisoned) {
-			swp_entry = make_hwpoison_entry(swapcache);
-			page = swapcache;
+			swp_entry = make_hwpoison_entry(page);
 		} else {
 			swp_entry = make_poisoned_swp_entry();
 		}
@@ -1786,7 +1789,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 	 * when reading from swap. This metadata may be indexed by swap entry
 	 * so this must be called before swap_free().
 	 */
-	arch_swap_restore(entry, page_folio(page));
+	arch_swap_restore(entry, folio);
 
 	/* See do_swap_page() */
 	VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
@@ -1794,23 +1797,23 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 
 	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
 	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-	get_page(page);
-	if (page == swapcache) {
+	folio_get(folio);
+	if (folio == swapcache) {
 		rmap_t rmap_flags = RMAP_NONE;
 
 		/*
-		 * See do_swap_page(): PageWriteback() would be problematic.
-		 * However, we do a wait_on_page_writeback() just before this
-		 * call and have the page locked.
+		 * See do_swap_page(): writeback would be problematic.
+		 * However, we do a folio_wait_writeback() just before this
+		 * call and have the folio locked.
 		 */
-		VM_BUG_ON_PAGE(PageWriteback(page), page);
+		VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
 		if (pte_swp_exclusive(old_pte))
 			rmap_flags |= RMAP_EXCLUSIVE;
 
 		page_add_anon_rmap(page, vma, addr, rmap_flags);
 	} else { /* ksm created a completely new copy */
-		page_add_new_anon_rmap(page, vma, addr);
-		lru_cache_add_inactive_or_unevictable(page, vma);
+		folio_add_new_anon_rmap(folio, vma, addr);
+		folio_add_lru_vma(folio, vma);
 	}
 	new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
 	if (pte_swp_soft_dirty(old_pte))
@@ -1823,9 +1826,9 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 out:
 	if (pte)
 		pte_unmap_unlock(pte, ptl);
-	if (page != swapcache) {
-		unlock_page(page);
-		put_page(page);
+	if (folio != swapcache) {
+		folio_unlock(folio);
+		folio_put(folio);
 	}
 	return ret;
 }
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 4/9] mm: Remove some calls to page_add_new_anon_rmap()
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
                   ` (2 preceding siblings ...)
  2023-12-11 16:22 ` [PATCH 3/9] mm: Convert unuse_pte() to use a folio throughout Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-12 13:20   ` David Hildenbrand
  2023-12-11 16:22 ` [PATCH 5/9] mm: Remove stale example from comment Matthew Wilcox (Oracle)
                   ` (4 subsequent siblings)
  8 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

We already have the folio in these functions, we just need to use it.
folio_add_new_anon_rmap() didn't exist at the time they were converted
to folios.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 kernel/events/uprobes.c | 2 +-
 mm/memory.c             | 2 +-
 mm/userfaultfd.c        | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 435aac1d8c27..8b115fc43f04 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -181,7 +181,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 
 	if (new_page) {
 		folio_get(new_folio);
-		page_add_new_anon_rmap(new_page, vma, addr);
+		folio_add_new_anon_rmap(new_folio, vma, addr);
 		folio_add_lru_vma(new_folio, vma);
 	} else
 		/* no new page, just dec_mm_counter for old_page */
diff --git a/mm/memory.c b/mm/memory.c
index 318f923134e4..c03a7729d5b4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4068,7 +4068,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 
 	/* ksm created a completely new copy */
 	if (unlikely(folio != swapcache && swapcache)) {
-		page_add_new_anon_rmap(page, vma, vmf->address);
+		folio_add_new_anon_rmap(folio, vma, vmf->address);
 		folio_add_lru_vma(folio, vma);
 	} else {
 		page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 71d0281f1162..2d8b03a009b4 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -116,7 +116,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
 			folio_add_lru(folio);
 		page_add_file_rmap(page, dst_vma, false);
 	} else {
-		page_add_new_anon_rmap(page, dst_vma, dst_addr);
+		folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
 		folio_add_lru_vma(folio, dst_vma);
 	}
 
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 5/9] mm: Remove stale example from comment
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
                   ` (3 preceding siblings ...)
  2023-12-11 16:22 ` [PATCH 4/9] mm: Remove some calls to page_add_new_anon_rmap() Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-12 13:20   ` David Hildenbrand
  2023-12-11 16:22 ` [PATCH 6/9] mm: Remove references to page_add_new_anon_rmap in comments Matthew Wilcox (Oracle)
                   ` (3 subsequent siblings)
  8 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm, Ralph Campbell

folio_add_new_anon_rmap() no longer works this way, so just remove the
entire example.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
---
 mm/memremap.c | 18 ++++--------------
 1 file changed, 4 insertions(+), 14 deletions(-)

diff --git a/mm/memremap.c b/mm/memremap.c
index 9531faa92a7c..9e9fb1972fff 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -473,21 +473,11 @@ void free_zone_device_page(struct page *page)
 		__ClearPageAnonExclusive(page);
 
 	/*
-	 * When a device managed page is freed, the page->mapping field
+	 * When a device managed page is freed, the folio->mapping field
 	 * may still contain a (stale) mapping value. For example, the
-	 * lower bits of page->mapping may still identify the page as an
-	 * anonymous page. Ultimately, this entire field is just stale
-	 * and wrong, and it will cause errors if not cleared.  One
-	 * example is:
-	 *
-	 *  migrate_vma_pages()
-	 *    migrate_vma_insert_page()
-	 *      page_add_new_anon_rmap()
-	 *        __page_set_anon_rmap()
-	 *          ...checks page->mapping, via PageAnon(page) call,
-	 *            and incorrectly concludes that the page is an
-	 *            anonymous page. Therefore, it incorrectly,
-	 *            silently fails to set up the new anon rmap.
+	 * lower bits of folio->mapping may still identify the folio as an
+	 * anonymous folio. Ultimately, this entire field is just stale
+	 * and wrong, and it will cause errors if not cleared.
 	 *
 	 * For other types of ZONE_DEVICE pages, migration is either
 	 * handled differently or not done at all, so there is no need
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 6/9] mm: Remove references to page_add_new_anon_rmap in comments
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
                   ` (4 preceding siblings ...)
  2023-12-11 16:22 ` [PATCH 5/9] mm: Remove stale example from comment Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-12 13:20   ` David Hildenbrand
  2023-12-11 16:22 ` [PATCH 7/9] mm: Convert migrate_vma_insert_page() to use a folio Matthew Wilcox (Oracle)
                   ` (2 subsequent siblings)
  8 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Refer to folio_add_new_anon_rmap() instead.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/rmap.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/rmap.c b/mm/rmap.c
index de9426ad0f1b..f3d49ec197ef 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1231,9 +1231,9 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
 	 * We have exclusion against page_add_anon_rmap because the caller
 	 * always holds the page locked.
 	 *
-	 * We have exclusion against page_add_new_anon_rmap because those pages
+	 * We have exclusion against folio_add_new_anon_rmap because those pages
 	 * are initially only visible via the pagetables, and the pte is locked
-	 * over the call to page_add_new_anon_rmap.
+	 * over the call to folio_add_new_anon_rmap.
 	 */
 	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
 			folio);
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 7/9] mm: Convert migrate_vma_insert_page() to use a folio
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
                   ` (5 preceding siblings ...)
  2023-12-11 16:22 ` [PATCH 6/9] mm: Remove references to page_add_new_anon_rmap in comments Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-11 22:17   ` Alistair Popple
  2023-12-12 13:21   ` David Hildenbrand
  2023-12-11 16:22 ` [PATCH 8/9] mm: Convert collapse_huge_page() " Matthew Wilcox (Oracle)
  2023-12-11 16:22 ` [PATCH 9/9] mm: Remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable Matthew Wilcox (Oracle)
  8 siblings, 2 replies; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Replaces five calls to compound_head() with one.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/migrate_device.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 8ac1f79f754a..81193363f8cd 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -564,6 +564,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
 				    struct page *page,
 				    unsigned long *src)
 {
+	struct folio *folio = page_folio(page);
 	struct vm_area_struct *vma = migrate->vma;
 	struct mm_struct *mm = vma->vm_mm;
 	bool flush = false;
@@ -596,17 +597,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
 		goto abort;
 	if (unlikely(anon_vma_prepare(vma)))
 		goto abort;
-	if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+	if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
 		goto abort;
 
 	/*
-	 * The memory barrier inside __SetPageUptodate makes sure that
-	 * preceding stores to the page contents become visible before
+	 * The memory barrier inside __folio_mark_uptodate makes sure that
+	 * preceding stores to the folio contents become visible before
 	 * the set_pte_at() write.
 	 */
-	__SetPageUptodate(page);
+	__folio_mark_uptodate(folio);
 
-	if (is_device_private_page(page)) {
+	if (folio_is_device_private(folio)) {
 		swp_entry_t swp_entry;
 
 		if (vma->vm_flags & VM_WRITE)
@@ -617,8 +618,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
 						page_to_pfn(page));
 		entry = swp_entry_to_pte(swp_entry);
 	} else {
-		if (is_zone_device_page(page) &&
-		    !is_device_coherent_page(page)) {
+		if (folio_is_zone_device(folio) &&
+		    !folio_is_device_coherent(folio)) {
 			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
 			goto abort;
 		}
@@ -652,10 +653,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
 		goto unlock_abort;
 
 	inc_mm_counter(mm, MM_ANONPAGES);
-	page_add_new_anon_rmap(page, vma, addr);
-	if (!is_zone_device_page(page))
-		lru_cache_add_inactive_or_unevictable(page, vma);
-	get_page(page);
+	folio_add_new_anon_rmap(folio, vma, addr);
+	if (!folio_is_zone_device(folio))
+		folio_add_lru_vma(folio, vma);
+	folio_get(folio);
 
 	if (flush) {
 		flush_cache_page(vma, addr, pte_pfn(orig_pte));
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 8/9] mm: Convert collapse_huge_page() to use a folio
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
                   ` (6 preceding siblings ...)
  2023-12-11 16:22 ` [PATCH 7/9] mm: Convert migrate_vma_insert_page() to use a folio Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-12 13:21   ` David Hildenbrand
  2023-12-11 16:22 ` [PATCH 9/9] mm: Remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable Matthew Wilcox (Oracle)
  8 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Replace three calls to compound_head() with one.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/khugepaged.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e722f754797f..f7ec73976c38 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1087,6 +1087,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	pmd_t *pmd, _pmd;
 	pte_t *pte;
 	pgtable_t pgtable;
+	struct folio *folio;
 	struct page *hpage;
 	spinlock_t *pmd_ptl, *pte_ptl;
 	int result = SCAN_FAIL;
@@ -1209,13 +1210,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	if (unlikely(result != SCAN_SUCCEED))
 		goto out_up_write;
 
+	folio = page_folio(hpage);
 	/*
-	 * spin_lock() below is not the equivalent of smp_wmb(), but
-	 * the smp_wmb() inside __SetPageUptodate() can be reused to
-	 * avoid the copy_huge_page writes to become visible after
-	 * the set_pmd_at() write.
+	 * The smp_wmb() inside __folio_mark_uptodate() ensures the
+	 * copy_huge_page writes become visible before the set_pmd_at()
+	 * write.
 	 */
-	__SetPageUptodate(hpage);
+	__folio_mark_uptodate(folio);
 	pgtable = pmd_pgtable(_pmd);
 
 	_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
@@ -1223,8 +1224,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 
 	spin_lock(pmd_ptl);
 	BUG_ON(!pmd_none(*pmd));
-	page_add_new_anon_rmap(hpage, vma, address);
-	lru_cache_add_inactive_or_unevictable(hpage, vma);
+	folio_add_new_anon_rmap(folio, vma, address);
+	folio_add_lru_vma(folio, vma);
 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
 	set_pmd_at(mm, address, pmd, _pmd);
 	update_mmu_cache_pmd(vma, address, pmd);
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 9/9] mm: Remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable
  2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
                   ` (7 preceding siblings ...)
  2023-12-11 16:22 ` [PATCH 8/9] mm: Convert collapse_huge_page() " Matthew Wilcox (Oracle)
@ 2023-12-11 16:22 ` Matthew Wilcox (Oracle)
  2023-12-12 13:21   ` David Hildenbrand
  8 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-12-11 16:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

All callers have now been converted to folio_add_new_anon_rmap() and
folio_add_lru_vma() so we can remove the wrapper.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/rmap.h |  2 --
 include/linux/swap.h |  3 ---
 mm/folio-compat.c    | 16 ----------------
 3 files changed, 21 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index af6a32b6f3e7..0ae2bb0e77f5 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -197,8 +197,6 @@ typedef int __bitwise rmap_t;
 void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
 void page_add_anon_rmap(struct page *, struct vm_area_struct *,
 		unsigned long address, rmap_t flags);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
-		unsigned long address);
 void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
 		unsigned long address);
 void page_add_file_rmap(struct page *, struct vm_area_struct *,
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4f25b1237364..edc0f2c8ce01 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -397,9 +397,6 @@ void folio_deactivate(struct folio *folio);
 void folio_mark_lazyfree(struct folio *folio);
 extern void swap_setup(void);
 
-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
-						struct vm_area_struct *vma);
-
 /* linux/mm/vmscan.c */
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index aee3b9a16828..50412014f16f 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
 }
 EXPORT_SYMBOL(redirty_page_for_writepage);
 
-void lru_cache_add_inactive_or_unevictable(struct page *page,
-		struct vm_area_struct *vma)
-{
-	folio_add_lru_vma(page_folio(page), vma);
-}
-
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 		pgoff_t index, gfp_t gfp)
 {
@@ -122,13 +116,3 @@ void putback_lru_page(struct page *page)
 {
 	folio_putback_lru(page_folio(page));
 }
-
-#ifdef CONFIG_MMU
-void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
-		unsigned long address)
-{
-	VM_BUG_ON_PAGE(PageTail(page), page);
-
-	return folio_add_new_anon_rmap((struct folio *)page, vma, address);
-}
-#endif
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH 7/9] mm: Convert migrate_vma_insert_page() to use a folio
  2023-12-11 16:22 ` [PATCH 7/9] mm: Convert migrate_vma_insert_page() to use a folio Matthew Wilcox (Oracle)
@ 2023-12-11 22:17   ` Alistair Popple
  2023-12-12 13:21   ` David Hildenbrand
  1 sibling, 0 replies; 23+ messages in thread
From: Alistair Popple @ 2023-12-11 22:17 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: Andrew Morton, linux-mm


Thanks. I've been working on converting most of the code in
migrate_device.c to use folios to add support for device-private THP
migration and had this exact change so feel free to add:

Reviewed-by: Alistair Popple <apopple@nvidia.com>

"Matthew Wilcox (Oracle)" <willy@infradead.org> writes:

> Replaces five calls to compound_head() with one.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  mm/migrate_device.c | 23 ++++++++++++-----------
>  1 file changed, 12 insertions(+), 11 deletions(-)
>
> diff --git a/mm/migrate_device.c b/mm/migrate_device.c
> index 8ac1f79f754a..81193363f8cd 100644
> --- a/mm/migrate_device.c
> +++ b/mm/migrate_device.c
> @@ -564,6 +564,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
>  				    struct page *page,
>  				    unsigned long *src)
>  {
> +	struct folio *folio = page_folio(page);
>  	struct vm_area_struct *vma = migrate->vma;
>  	struct mm_struct *mm = vma->vm_mm;
>  	bool flush = false;
> @@ -596,17 +597,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
>  		goto abort;
>  	if (unlikely(anon_vma_prepare(vma)))
>  		goto abort;
> -	if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
> +	if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
>  		goto abort;
>  
>  	/*
> -	 * The memory barrier inside __SetPageUptodate makes sure that
> -	 * preceding stores to the page contents become visible before
> +	 * The memory barrier inside __folio_mark_uptodate makes sure that
> +	 * preceding stores to the folio contents become visible before
>  	 * the set_pte_at() write.
>  	 */
> -	__SetPageUptodate(page);
> +	__folio_mark_uptodate(folio);
>  
> -	if (is_device_private_page(page)) {
> +	if (folio_is_device_private(folio)) {
>  		swp_entry_t swp_entry;
>  
>  		if (vma->vm_flags & VM_WRITE)
> @@ -617,8 +618,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
>  						page_to_pfn(page));
>  		entry = swp_entry_to_pte(swp_entry);
>  	} else {
> -		if (is_zone_device_page(page) &&
> -		    !is_device_coherent_page(page)) {
> +		if (folio_is_zone_device(folio) &&
> +		    !folio_is_device_coherent(folio)) {
>  			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
>  			goto abort;
>  		}
> @@ -652,10 +653,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
>  		goto unlock_abort;
>  
>  	inc_mm_counter(mm, MM_ANONPAGES);
> -	page_add_new_anon_rmap(page, vma, addr);
> -	if (!is_zone_device_page(page))
> -		lru_cache_add_inactive_or_unevictable(page, vma);
> -	get_page(page);
> +	folio_add_new_anon_rmap(folio, vma, addr);
> +	if (!folio_is_zone_device(folio))
> +		folio_add_lru_vma(folio, vma);
> +	folio_get(folio);
>  
>  	if (flush) {
>  		flush_cache_page(vma, addr, pte_pfn(orig_pte));



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 2/9] mm: Simplify the assertions in unuse_pte()
  2023-12-11 16:22 ` [PATCH 2/9] mm: Simplify the assertions in unuse_pte() Matthew Wilcox (Oracle)
@ 2023-12-12 12:26   ` David Hildenbrand
  2023-12-12 13:52     ` Matthew Wilcox
  0 siblings, 1 reply; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 12:26 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton; +Cc: linux-mm

On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
> We should only see anon folios in this function (and there are many
> assumptions of that already), so we can simplify these two assertions.

If we swapped in a fresh page, it is not PageAnon before we do the 
page_add_anon_rmap() call.

So I'm pretty sure this is wrong.

[I have plans of moving the "turn into anon folio" out of 
page_add_anon_rmap(), it will require teaching page_add_new_anon_rmap() 
about RMAP_EXCLUSIVE]

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios
  2023-12-11 16:22 ` [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios Matthew Wilcox (Oracle)
@ 2023-12-12 12:32   ` David Hildenbrand
  2023-12-12 12:43     ` Matthew Wilcox
  0 siblings, 1 reply; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 12:32 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton; +Cc: linux-mm

> +++ b/mm/swapfile.c
> @@ -1749,11 +1749,13 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
>   	int ret = 1;
>   
>   	swapcache = page;
> -	page = ksm_might_need_to_copy(page, vma, addr);
> -	if (unlikely(!page))
> +	folio = ksm_might_need_to_copy(folio, vma, addr);
> +	if (unlikely(!folio))
>   		return -ENOMEM;
> -	else if (unlikely(PTR_ERR(page) == -EHWPOISON))
> +	else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
>   		hwpoisoned = true;
> +	else
> +		page = folio_file_page(folio, swp_offset(entry));

Just to double-check:

assuming ksm_might_need_to_copy() in fact allocated a fresh (order-0) 
folio, folio_file_page() will simply translate to "folio_page(folio, 0)" 
and should be fine.

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios
  2023-12-12 12:32   ` David Hildenbrand
@ 2023-12-12 12:43     ` Matthew Wilcox
  2023-12-12 12:43       ` David Hildenbrand
  0 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox @ 2023-12-12 12:43 UTC (permalink / raw)
  To: David Hildenbrand; +Cc: Andrew Morton, linux-mm

On Tue, Dec 12, 2023 at 01:32:50PM +0100, David Hildenbrand wrote:
> > +++ b/mm/swapfile.c
> > @@ -1749,11 +1749,13 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
> >   	int ret = 1;
> >   	swapcache = page;
> > -	page = ksm_might_need_to_copy(page, vma, addr);
> > -	if (unlikely(!page))
> > +	folio = ksm_might_need_to_copy(folio, vma, addr);
> > +	if (unlikely(!folio))
> >   		return -ENOMEM;
> > -	else if (unlikely(PTR_ERR(page) == -EHWPOISON))
> > +	else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
> >   		hwpoisoned = true;
> > +	else
> > +		page = folio_file_page(folio, swp_offset(entry));
> 
> Just to double-check:
> 
> assuming ksm_might_need_to_copy() in fact allocated a fresh (order-0) folio,
> folio_file_page() will simply translate to "folio_page(folio, 0)" and should
> be fine.

That's right:

static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
{
        return folio_page(folio, index & (folio_nr_pages(folio) - 1));
}

so an order-0 folio will return the only page in the folio.


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios
  2023-12-12 12:43     ` Matthew Wilcox
@ 2023-12-12 12:43       ` David Hildenbrand
  0 siblings, 0 replies; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 12:43 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Andrew Morton, linux-mm

On 12.12.23 13:43, Matthew Wilcox wrote:
> On Tue, Dec 12, 2023 at 01:32:50PM +0100, David Hildenbrand wrote:
>>> +++ b/mm/swapfile.c
>>> @@ -1749,11 +1749,13 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
>>>    	int ret = 1;
>>>    	swapcache = page;
>>> -	page = ksm_might_need_to_copy(page, vma, addr);
>>> -	if (unlikely(!page))
>>> +	folio = ksm_might_need_to_copy(folio, vma, addr);
>>> +	if (unlikely(!folio))
>>>    		return -ENOMEM;
>>> -	else if (unlikely(PTR_ERR(page) == -EHWPOISON))
>>> +	else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
>>>    		hwpoisoned = true;
>>> +	else
>>> +		page = folio_file_page(folio, swp_offset(entry));
>>
>> Just to double-check:
>>
>> assuming ksm_might_need_to_copy() in fact allocated a fresh (order-0) folio,
>> folio_file_page() will simply translate to "folio_page(folio, 0)" and should
>> be fine.
> 
> That's right:
> 
> static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
> {
>          return folio_page(folio, index & (folio_nr_pages(folio) - 1));
> }
> 
> so an order-0 folio will return the only page in the folio.
> 

LGTM then

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 4/9] mm: Remove some calls to page_add_new_anon_rmap()
  2023-12-11 16:22 ` [PATCH 4/9] mm: Remove some calls to page_add_new_anon_rmap() Matthew Wilcox (Oracle)
@ 2023-12-12 13:20   ` David Hildenbrand
  0 siblings, 0 replies; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 13:20 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton; +Cc: linux-mm

On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
> We already have the folio in these functions, we just need to use it.
> folio_add_new_anon_rmap() didn't exist at the time they were converted
> to folios.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   kernel/events/uprobes.c | 2 +-
>   mm/memory.c             | 2 +-
>   mm/userfaultfd.c        | 2 +-
>   3 files changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index 435aac1d8c27..8b115fc43f04 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -181,7 +181,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
>   
>   	if (new_page) {
>   		folio_get(new_folio);
> -		page_add_new_anon_rmap(new_page, vma, addr);
> +		folio_add_new_anon_rmap(new_folio, vma, addr);
>   		folio_add_lru_vma(new_folio, vma);
>   	} else
>   		/* no new page, just dec_mm_counter for old_page */
> diff --git a/mm/memory.c b/mm/memory.c
> index 318f923134e4..c03a7729d5b4 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4068,7 +4068,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>   
>   	/* ksm created a completely new copy */
>   	if (unlikely(folio != swapcache && swapcache)) {
> -		page_add_new_anon_rmap(page, vma, vmf->address);
> +		folio_add_new_anon_rmap(folio, vma, vmf->address);
>   		folio_add_lru_vma(folio, vma);
>   	} else {
>   		page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index 71d0281f1162..2d8b03a009b4 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -116,7 +116,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
>   			folio_add_lru(folio);
>   		page_add_file_rmap(page, dst_vma, false);
>   	} else {
> -		page_add_new_anon_rmap(page, dst_vma, dst_addr);
> +		folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
>   		folio_add_lru_vma(folio, dst_vma);
>   	}
>   

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 5/9] mm: Remove stale example from comment
  2023-12-11 16:22 ` [PATCH 5/9] mm: Remove stale example from comment Matthew Wilcox (Oracle)
@ 2023-12-12 13:20   ` David Hildenbrand
  0 siblings, 0 replies; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 13:20 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton; +Cc: linux-mm, Ralph Campbell

On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
> folio_add_new_anon_rmap() no longer works this way, so just remove the
> entire example.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Cc: Ralph Campbell <rcampbell@nvidia.com>
> ---
>   mm/memremap.c | 18 ++++--------------
>   1 file changed, 4 insertions(+), 14 deletions(-)
> 
> diff --git a/mm/memremap.c b/mm/memremap.c
> index 9531faa92a7c..9e9fb1972fff 100644
> --- a/mm/memremap.c
> +++ b/mm/memremap.c
> @@ -473,21 +473,11 @@ void free_zone_device_page(struct page *page)
>   		__ClearPageAnonExclusive(page);
>   
>   	/*
> -	 * When a device managed page is freed, the page->mapping field
> +	 * When a device managed page is freed, the folio->mapping field
>   	 * may still contain a (stale) mapping value. For example, the
> -	 * lower bits of page->mapping may still identify the page as an
> -	 * anonymous page. Ultimately, this entire field is just stale
> -	 * and wrong, and it will cause errors if not cleared.  One
> -	 * example is:
> -	 *
> -	 *  migrate_vma_pages()
> -	 *    migrate_vma_insert_page()
> -	 *      page_add_new_anon_rmap()
> -	 *        __page_set_anon_rmap()
> -	 *          ...checks page->mapping, via PageAnon(page) call,
> -	 *            and incorrectly concludes that the page is an
> -	 *            anonymous page. Therefore, it incorrectly,
> -	 *            silently fails to set up the new anon rmap.
> +	 * lower bits of folio->mapping may still identify the folio as an
> +	 * anonymous folio. Ultimately, this entire field is just stale
> +	 * and wrong, and it will cause errors if not cleared.
>   	 *
>   	 * For other types of ZONE_DEVICE pages, migration is either
>   	 * handled differently or not done at all, so there is no need

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 6/9] mm: Remove references to page_add_new_anon_rmap in comments
  2023-12-11 16:22 ` [PATCH 6/9] mm: Remove references to page_add_new_anon_rmap in comments Matthew Wilcox (Oracle)
@ 2023-12-12 13:20   ` David Hildenbrand
  0 siblings, 0 replies; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 13:20 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton; +Cc: linux-mm

On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
> Refer to folio_add_new_anon_rmap() instead.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   mm/rmap.c | 4 ++--
>   1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/rmap.c b/mm/rmap.c
> index de9426ad0f1b..f3d49ec197ef 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1231,9 +1231,9 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
>   	 * We have exclusion against page_add_anon_rmap because the caller
>   	 * always holds the page locked.
>   	 *
> -	 * We have exclusion against page_add_new_anon_rmap because those pages
> +	 * We have exclusion against folio_add_new_anon_rmap because those pages
>   	 * are initially only visible via the pagetables, and the pte is locked
> -	 * over the call to page_add_new_anon_rmap.
> +	 * over the call to folio_add_new_anon_rmap.
>   	 */
>   	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
>   			folio);

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 7/9] mm: Convert migrate_vma_insert_page() to use a folio
  2023-12-11 16:22 ` [PATCH 7/9] mm: Convert migrate_vma_insert_page() to use a folio Matthew Wilcox (Oracle)
  2023-12-11 22:17   ` Alistair Popple
@ 2023-12-12 13:21   ` David Hildenbrand
  1 sibling, 0 replies; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 13:21 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton; +Cc: linux-mm

On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
> Replaces five calls to compound_head() with one.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   mm/migrate_device.c | 23 ++++++++++++-----------
>   1 file changed, 12 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/migrate_device.c b/mm/migrate_device.c
> index 8ac1f79f754a..81193363f8cd 100644
> --- a/mm/migrate_device.c
> +++ b/mm/migrate_device.c
> @@ -564,6 +564,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
>   				    struct page *page,
>   				    unsigned long *src)
>   {
> +	struct folio *folio = page_folio(page);
>   	struct vm_area_struct *vma = migrate->vma;
>   	struct mm_struct *mm = vma->vm_mm;
>   	bool flush = false;
> @@ -596,17 +597,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
>   		goto abort;
>   	if (unlikely(anon_vma_prepare(vma)))
>   		goto abort;
> -	if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
> +	if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
>   		goto abort;
>   
>   	/*
> -	 * The memory barrier inside __SetPageUptodate makes sure that
> -	 * preceding stores to the page contents become visible before
> +	 * The memory barrier inside __folio_mark_uptodate makes sure that
> +	 * preceding stores to the folio contents become visible before
>   	 * the set_pte_at() write.
>   	 */
> -	__SetPageUptodate(page);
> +	__folio_mark_uptodate(folio);
>   
> -	if (is_device_private_page(page)) {
> +	if (folio_is_device_private(folio)) {
>   		swp_entry_t swp_entry;
>   
>   		if (vma->vm_flags & VM_WRITE)
> @@ -617,8 +618,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
>   						page_to_pfn(page));
>   		entry = swp_entry_to_pte(swp_entry);
>   	} else {
> -		if (is_zone_device_page(page) &&
> -		    !is_device_coherent_page(page)) {
> +		if (folio_is_zone_device(folio) &&
> +		    !folio_is_device_coherent(folio)) {
>   			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
>   			goto abort;
>   		}
> @@ -652,10 +653,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
>   		goto unlock_abort;
>   
>   	inc_mm_counter(mm, MM_ANONPAGES);
> -	page_add_new_anon_rmap(page, vma, addr);
> -	if (!is_zone_device_page(page))
> -		lru_cache_add_inactive_or_unevictable(page, vma);
> -	get_page(page);
> +	folio_add_new_anon_rmap(folio, vma, addr);
> +	if (!folio_is_zone_device(folio))
> +		folio_add_lru_vma(folio, vma);
> +	folio_get(folio);
>   
>   	if (flush) {
>   		flush_cache_page(vma, addr, pte_pfn(orig_pte));

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 8/9] mm: Convert collapse_huge_page() to use a folio
  2023-12-11 16:22 ` [PATCH 8/9] mm: Convert collapse_huge_page() " Matthew Wilcox (Oracle)
@ 2023-12-12 13:21   ` David Hildenbrand
  0 siblings, 0 replies; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 13:21 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton; +Cc: linux-mm

On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
> Replace three calls to compound_head() with one.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   mm/khugepaged.c | 15 ++++++++-------
>   1 file changed, 8 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index e722f754797f..f7ec73976c38 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1087,6 +1087,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>   	pmd_t *pmd, _pmd;
>   	pte_t *pte;
>   	pgtable_t pgtable;
> +	struct folio *folio;
>   	struct page *hpage;
>   	spinlock_t *pmd_ptl, *pte_ptl;
>   	int result = SCAN_FAIL;
> @@ -1209,13 +1210,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>   	if (unlikely(result != SCAN_SUCCEED))
>   		goto out_up_write;
>   
> +	folio = page_folio(hpage);
>   	/*
> -	 * spin_lock() below is not the equivalent of smp_wmb(), but
> -	 * the smp_wmb() inside __SetPageUptodate() can be reused to
> -	 * avoid the copy_huge_page writes to become visible after
> -	 * the set_pmd_at() write.
> +	 * The smp_wmb() inside __folio_mark_uptodate() ensures the
> +	 * copy_huge_page writes become visible before the set_pmd_at()
> +	 * write.
>   	 */
> -	__SetPageUptodate(hpage);
> +	__folio_mark_uptodate(folio);
>   	pgtable = pmd_pgtable(_pmd);
>   
>   	_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
> @@ -1223,8 +1224,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>   
>   	spin_lock(pmd_ptl);
>   	BUG_ON(!pmd_none(*pmd));
> -	page_add_new_anon_rmap(hpage, vma, address);
> -	lru_cache_add_inactive_or_unevictable(hpage, vma);
> +	folio_add_new_anon_rmap(folio, vma, address);
> +	folio_add_lru_vma(folio, vma);
>   	pgtable_trans_huge_deposit(mm, pmd, pgtable);
>   	set_pmd_at(mm, address, pmd, _pmd);
>   	update_mmu_cache_pmd(vma, address, pmd);

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 9/9] mm: Remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable
  2023-12-11 16:22 ` [PATCH 9/9] mm: Remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable Matthew Wilcox (Oracle)
@ 2023-12-12 13:21   ` David Hildenbrand
  0 siblings, 0 replies; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 13:21 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton; +Cc: linux-mm

On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
> All callers have now been converted to folio_add_new_anon_rmap() and
> folio_add_lru_vma() so we can remove the wrapper.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   include/linux/rmap.h |  2 --
>   include/linux/swap.h |  3 ---
>   mm/folio-compat.c    | 16 ----------------
>   3 files changed, 21 deletions(-)
> 
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index af6a32b6f3e7..0ae2bb0e77f5 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -197,8 +197,6 @@ typedef int __bitwise rmap_t;
>   void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
>   void page_add_anon_rmap(struct page *, struct vm_area_struct *,
>   		unsigned long address, rmap_t flags);
> -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
> -		unsigned long address);
>   void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
>   		unsigned long address);
>   void page_add_file_rmap(struct page *, struct vm_area_struct *,
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 4f25b1237364..edc0f2c8ce01 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -397,9 +397,6 @@ void folio_deactivate(struct folio *folio);
>   void folio_mark_lazyfree(struct folio *folio);
>   extern void swap_setup(void);
>   
> -extern void lru_cache_add_inactive_or_unevictable(struct page *page,
> -						struct vm_area_struct *vma);
> -
>   /* linux/mm/vmscan.c */
>   extern unsigned long zone_reclaimable_pages(struct zone *zone);
>   extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> diff --git a/mm/folio-compat.c b/mm/folio-compat.c
> index aee3b9a16828..50412014f16f 100644
> --- a/mm/folio-compat.c
> +++ b/mm/folio-compat.c
> @@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
>   }
>   EXPORT_SYMBOL(redirty_page_for_writepage);
>   
> -void lru_cache_add_inactive_or_unevictable(struct page *page,
> -		struct vm_area_struct *vma)
> -{
> -	folio_add_lru_vma(page_folio(page), vma);
> -}
> -
>   int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
>   		pgoff_t index, gfp_t gfp)
>   {
> @@ -122,13 +116,3 @@ void putback_lru_page(struct page *page)
>   {
>   	folio_putback_lru(page_folio(page));
>   }
> -
> -#ifdef CONFIG_MMU
> -void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
> -		unsigned long address)
> -{
> -	VM_BUG_ON_PAGE(PageTail(page), page);
> -
> -	return folio_add_new_anon_rmap((struct folio *)page, vma, address);
> -}
> -#endif

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 2/9] mm: Simplify the assertions in unuse_pte()
  2023-12-12 12:26   ` David Hildenbrand
@ 2023-12-12 13:52     ` Matthew Wilcox
  2023-12-12 13:55       ` David Hildenbrand
  0 siblings, 1 reply; 23+ messages in thread
From: Matthew Wilcox @ 2023-12-12 13:52 UTC (permalink / raw)
  To: David Hildenbrand; +Cc: Andrew Morton, linux-mm

On Tue, Dec 12, 2023 at 01:26:35PM +0100, David Hildenbrand wrote:
> On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
> > We should only see anon folios in this function (and there are many
> > assumptions of that already), so we can simplify these two assertions.
> 
> If we swapped in a fresh page, it is not PageAnon before we do the
> page_add_anon_rmap() call.
> 
> So I'm pretty sure this is wrong.

Argh, yes.

What do you think to just dropping the assertions altogether?  You
added them in 78fbe906cc90 as part of general paranoia about using
an existing flag for a new purpose.  I think they've now served their
purpose and can go away.

Perhaps simply:

	VM_BUG_ON_PAGE(PageAnonExclusive(page), page);


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 2/9] mm: Simplify the assertions in unuse_pte()
  2023-12-12 13:52     ` Matthew Wilcox
@ 2023-12-12 13:55       ` David Hildenbrand
  0 siblings, 0 replies; 23+ messages in thread
From: David Hildenbrand @ 2023-12-12 13:55 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Andrew Morton, linux-mm

On 12.12.23 14:52, Matthew Wilcox wrote:
> On Tue, Dec 12, 2023 at 01:26:35PM +0100, David Hildenbrand wrote:
>> On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote:
>>> We should only see anon folios in this function (and there are many
>>> assumptions of that already), so we can simplify these two assertions.
>>
>> If we swapped in a fresh page, it is not PageAnon before we do the
>> page_add_anon_rmap() call.
>>
>> So I'm pretty sure this is wrong.
> 
> Argh, yes.
> 
> What do you think to just dropping the assertions altogether?  You
> added them in 78fbe906cc90 as part of general paranoia about using
> an existing flag for a new purpose.  I think they've now served their
> purpose and can go away.

At least the ones here, yes. I mean, unuse_pte() is a very corner case 
feature either way.

> 
> Perhaps simply:
> 
> 	VM_BUG_ON_PAGE(PageAnonExclusive(page), page);

If we refault a page form the swapcache, it would already be PageAnon.

Maybe just drop the assertions completely in unuse_pte().

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2023-12-12 13:56 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-11 16:22 [PATCH 0/9] Finish two folio conversions Matthew Wilcox (Oracle)
2023-12-11 16:22 ` [PATCH 1/9] mm: Convert ksm_might_need_to_copy() to work on folios Matthew Wilcox (Oracle)
2023-12-12 12:32   ` David Hildenbrand
2023-12-12 12:43     ` Matthew Wilcox
2023-12-12 12:43       ` David Hildenbrand
2023-12-11 16:22 ` [PATCH 2/9] mm: Simplify the assertions in unuse_pte() Matthew Wilcox (Oracle)
2023-12-12 12:26   ` David Hildenbrand
2023-12-12 13:52     ` Matthew Wilcox
2023-12-12 13:55       ` David Hildenbrand
2023-12-11 16:22 ` [PATCH 3/9] mm: Convert unuse_pte() to use a folio throughout Matthew Wilcox (Oracle)
2023-12-11 16:22 ` [PATCH 4/9] mm: Remove some calls to page_add_new_anon_rmap() Matthew Wilcox (Oracle)
2023-12-12 13:20   ` David Hildenbrand
2023-12-11 16:22 ` [PATCH 5/9] mm: Remove stale example from comment Matthew Wilcox (Oracle)
2023-12-12 13:20   ` David Hildenbrand
2023-12-11 16:22 ` [PATCH 6/9] mm: Remove references to page_add_new_anon_rmap in comments Matthew Wilcox (Oracle)
2023-12-12 13:20   ` David Hildenbrand
2023-12-11 16:22 ` [PATCH 7/9] mm: Convert migrate_vma_insert_page() to use a folio Matthew Wilcox (Oracle)
2023-12-11 22:17   ` Alistair Popple
2023-12-12 13:21   ` David Hildenbrand
2023-12-11 16:22 ` [PATCH 8/9] mm: Convert collapse_huge_page() " Matthew Wilcox (Oracle)
2023-12-12 13:21   ` David Hildenbrand
2023-12-11 16:22 ` [PATCH 9/9] mm: Remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable Matthew Wilcox (Oracle)
2023-12-12 13:21   ` David Hildenbrand

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.