linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/rmap: convert __page_{set, check}_anon_rmap() to folios
@ 2023-04-12  2:18 Yajun Deng
  2023-04-12  8:43 ` David Hildenbrand
  2023-04-12 13:04 ` Matthew Wilcox
  0 siblings, 2 replies; 3+ messages in thread
From: Yajun Deng @ 2023-04-12  2:18 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, linux-kernel, Yajun Deng

The 2nd parameter in __page_set_anon_rmap() is only used by
SetPageAnonExclusive(), so there is no need to put
SetPageAnonExclusive() in  __page_set_anon_rmap().

Remove SetPageAnonExclusive() from __page_set_anon_rmap() and modify
__page_set_anon_rmap() to __folio_set_anon_rmap().

Since __page_check_anon_rmap() needs a folio, we can directly use
and change it to __folio_check_anon_rmap().

Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
---
 mm/rmap.c | 54 ++++++++++++++++++++++++++++--------------------------
 1 file changed, 28 insertions(+), 26 deletions(-)

diff --git a/mm/rmap.c b/mm/rmap.c
index ba901c416785..8dc71caa208f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1129,14 +1129,13 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
 }
 
 /**
- * __page_set_anon_rmap - set up new anonymous rmap
- * @folio:	Folio which contains page.
- * @page:	Page to add to rmap.
+ * __folio_set_anon_rmap - set up new anonymous rmap
+ * @folio:	Folio to add to rmap.
  * @vma:	VM area to add page to.
  * @address:	User virtual address of the mapping	
  * @exclusive:	the page is exclusively owned by the current process
  */
-static void __page_set_anon_rmap(struct folio *folio, struct page *page,
+static void __folio_set_anon_rmap(struct folio *folio,
 	struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
 	struct anon_vma *anon_vma = vma->anon_vma;
@@ -1144,7 +1143,7 @@ static void __page_set_anon_rmap(struct folio *folio, struct page *page,
 	BUG_ON(!anon_vma);
 
 	if (folio_test_anon(folio))
-		goto out;
+		return;
 
 	/*
 	 * If the page isn't exclusively mapped into this vma,
@@ -1163,21 +1162,17 @@ static void __page_set_anon_rmap(struct folio *folio, struct page *page,
 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
 	WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
 	folio->index = linear_page_index(vma, address);
-out:
-	if (exclusive)
-		SetPageAnonExclusive(page);
 }
 
 /**
- * __page_check_anon_rmap - sanity check anonymous rmap addition
- * @page:	the page to add the mapping to
+ * __folio_check_anon_rmap - sanity check anonymous rmap addition
+ * @folio:	the folio to add the mapping to
  * @vma:	the vm area in which the mapping is added
  * @address:	the user virtual address mapped
  */
-static void __page_check_anon_rmap(struct page *page,
+static void __folio_check_anon_rmap(struct folio *folio,
 	struct vm_area_struct *vma, unsigned long address)
 {
-	struct folio *folio = page_folio(page);
 	/*
 	 * The page's anon-rmap details (mapping and index) are guaranteed to
 	 * be set up correctly at this point.
@@ -1191,8 +1186,8 @@ static void __page_check_anon_rmap(struct page *page,
 	 */
 	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
 			folio);
-	VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
-		       page);
+	VM_BUG_ON_FOLIO(folio_pgoff(folio) != linear_page_index(vma, address),
+		       folio);
 }
 
 /**
@@ -1214,6 +1209,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 	atomic_t *mapped = &folio->_nr_pages_mapped;
 	int nr = 0, nr_pmdmapped = 0;
 	bool compound = flags & RMAP_COMPOUND;
+	bool exclusive = !!(flags & RMAP_EXCLUSIVE);
 	bool first = true;
 
 	/* Is page being mapped by PTE? Is this its first map to be added? */
@@ -1243,7 +1239,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 		}
 	}
 
-	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
+	VM_BUG_ON_PAGE(!first && exclusive, page);
 	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
 
 	if (nr_pmdmapped)
@@ -1253,11 +1249,12 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 
 	if (likely(!folio_test_ksm(folio))) {
 		/* address might be in next vma when migration races vma_merge */
-		if (first)
-			__page_set_anon_rmap(folio, page, vma, address,
-					     !!(flags & RMAP_EXCLUSIVE));
-		else
-			__page_check_anon_rmap(page, vma, address);
+		if (first) {
+			__folio_set_anon_rmap(folio, vma, address, exclusive);
+			if (exclusive)
+				SetPageAnonExclusive(page);
+		} else
+			__folio_check_anon_rmap(folio, vma, address);
 	}
 
 	mlock_vma_folio(folio, vma, compound);
@@ -1297,7 +1294,8 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
 	}
 
 	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
-	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
+	__folio_set_anon_rmap(folio, vma, address, 1);
+	SetPageAnonExclusive(&folio->page);
 }
 
 /**
@@ -2533,17 +2531,20 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 {
 	struct folio *folio = page_folio(page);
 	struct anon_vma *anon_vma = vma->anon_vma;
+	bool exclusive = !!(flags & RMAP_EXCLUSIVE);
 	int first;
 
 	BUG_ON(!folio_test_locked(folio));
 	BUG_ON(!anon_vma);
 	/* address might be in next vma when migration races vma_merge */
 	first = atomic_inc_and_test(&folio->_entire_mapcount);
-	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
+	VM_BUG_ON_PAGE(!first && exclusive, page);
 	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
-	if (first)
-		__page_set_anon_rmap(folio, page, vma, address,
-				     !!(flags & RMAP_EXCLUSIVE));
+	if (first) {
+		__folio_set_anon_rmap(folio, vma, address, exclusive);
+		if (exclusive)
+			SetPageAnonExclusive(page);
+	}
 }
 
 void hugepage_add_new_anon_rmap(struct folio *folio,
@@ -2553,6 +2554,7 @@ void hugepage_add_new_anon_rmap(struct folio *folio,
 	/* increment count (starts at -1) */
 	atomic_set(&folio->_entire_mapcount, 0);
 	folio_clear_hugetlb_restore_reserve(folio);
-	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
+	__folio_set_anon_rmap(folio, vma, address, 1);
+	SetPageAnonExclusive(&folio->page);
 }
 #endif /* CONFIG_HUGETLB_PAGE */
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] mm/rmap: convert __page_{set, check}_anon_rmap() to folios
  2023-04-12  2:18 [PATCH] mm/rmap: convert __page_{set, check}_anon_rmap() to folios Yajun Deng
@ 2023-04-12  8:43 ` David Hildenbrand
  2023-04-12 13:04 ` Matthew Wilcox
  1 sibling, 0 replies; 3+ messages in thread
From: David Hildenbrand @ 2023-04-12  8:43 UTC (permalink / raw)
  To: Yajun Deng, akpm; +Cc: linux-mm, linux-kernel

On 12.04.23 04:18, Yajun Deng wrote:
> The 2nd parameter in __page_set_anon_rmap() is only used by
> SetPageAnonExclusive(), so there is no need to put
> SetPageAnonExclusive() in  __page_set_anon_rmap().
> 
> Remove SetPageAnonExclusive() from __page_set_anon_rmap() and modify
> __page_set_anon_rmap() to __folio_set_anon_rmap().
> 
> Since __page_check_anon_rmap() needs a folio, we can directly use
> and change it to __folio_check_anon_rmap().
> 

Leaking the SetPageAnonExclusive() into all callers is not an improvement.

Not convinced we want this patch TBH. Tentative NAK.

-- 
Thanks,

David / dhildenb



^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] mm/rmap: convert __page_{set, check}_anon_rmap() to folios
  2023-04-12  2:18 [PATCH] mm/rmap: convert __page_{set, check}_anon_rmap() to folios Yajun Deng
  2023-04-12  8:43 ` David Hildenbrand
@ 2023-04-12 13:04 ` Matthew Wilcox
  1 sibling, 0 replies; 3+ messages in thread
From: Matthew Wilcox @ 2023-04-12 13:04 UTC (permalink / raw)
  To: Yajun Deng; +Cc: akpm, linux-mm, linux-kernel

On Wed, Apr 12, 2023 at 10:18:11AM +0800, Yajun Deng wrote:
> The 2nd parameter in __page_set_anon_rmap() is only used by
> SetPageAnonExclusive(), so there is no need to put
> SetPageAnonExclusive() in  __page_set_anon_rmap().
> 
> Remove SetPageAnonExclusive() from __page_set_anon_rmap() and modify
> __page_set_anon_rmap() to __folio_set_anon_rmap().
> 
> Since __page_check_anon_rmap() needs a folio, we can directly use
> and change it to __folio_check_anon_rmap().

I do not think this is an improvement.


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-04-12 13:04 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-12  2:18 [PATCH] mm/rmap: convert __page_{set, check}_anon_rmap() to folios Yajun Deng
2023-04-12  8:43 ` David Hildenbrand
2023-04-12 13:04 ` Matthew Wilcox

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).