All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios
@ 2022-12-28 11:34 Kefeng Wang
  2022-12-28 11:34 ` [PATCH -next v3 1/7] mm: page_idle: convert page idle " Kefeng Wang
                   ` (7 more replies)
  0 siblings, 8 replies; 19+ messages in thread
From: Kefeng Wang @ 2022-12-28 11:34 UTC (permalink / raw)
  To: Andrew Morton, SeongJae Park
  Cc: damon, linux-mm, vishal.moola, willy, david, Kefeng Wang

v3:
- more thoroughly converting in page_idle_get_folio() suggested by Matthew
  also do it in damon_get_folio().
- address some commets from SeongJae

v2:
- drop unsafe pfn_to_online_folio(), convert page_idle_get_page() and 
  damon_get_page() to return a folio after grab a reference
- convert damon hugetlb related functions
- rebased on next-20221226.

Kefeng Wang (7):
  mm: page_idle: Convert page idle to use folios
  mm/damon: introduce damon_get_folio()
  mm/damon: convert damon_ptep/pmdp_mkold() to use folios
  mm/damon: paddr: convert damon_pa_*() to use folios
  mm/damon: vaddr: convert damon_young_pmd_entry() to use folio
  mm/damon: remove unneed damon_get_page()
  mm/damon: vaddr: convert hugetlb related function to use folios

 mm/damon/ops-common.c | 38 +++++++++++++++-------------
 mm/damon/ops-common.h |  2 +-
 mm/damon/paddr.c      | 58 +++++++++++++++++++------------------------
 mm/damon/vaddr.c      | 38 ++++++++++++++--------------
 mm/page_idle.c        | 47 ++++++++++++++++++-----------------
 5 files changed, 91 insertions(+), 92 deletions(-)

-- 
2.35.3


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH -next v3 1/7] mm: page_idle: convert page idle to use folios
  2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
@ 2022-12-28 11:34 ` Kefeng Wang
  2022-12-28 17:46   ` SeongJae Park
  2022-12-28 11:34 ` [PATCH -next v3 2/7] mm/damon: introduce damon_get_folio() Kefeng Wang
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 19+ messages in thread
From: Kefeng Wang @ 2022-12-28 11:34 UTC (permalink / raw)
  To: Andrew Morton, SeongJae Park
  Cc: damon, linux-mm, vishal.moola, willy, david, Kefeng Wang

Firtly, make page_idle_get_page() return a folio, also rename it to
page_idle_get_folio(), then, use it to covert page_idle_bitmap_read()
and page_idle_bitmap_write() functions.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/page_idle.c | 47 ++++++++++++++++++++++++-----------------------
 1 file changed, 24 insertions(+), 23 deletions(-)

diff --git a/mm/page_idle.c b/mm/page_idle.c
index bc08332a609c..41ea77f22011 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -31,19 +31,22 @@
  *
  * This function tries to get a user memory page by pfn as described above.
  */
-static struct page *page_idle_get_page(unsigned long pfn)
+static struct folio *page_idle_get_folio(unsigned long pfn)
 {
 	struct page *page = pfn_to_online_page(pfn);
+	struct folio *folio;
 
-	if (!page || !PageLRU(page) ||
-	    !get_page_unless_zero(page))
+	if (!page || PageTail(page))
 		return NULL;
 
-	if (unlikely(!PageLRU(page))) {
-		put_page(page);
-		page = NULL;
+	folio = page_folio(page);
+	if (!folio_test_lru(folio) || !folio_try_get(folio))
+		return NULL;
+	if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
+		folio_put(folio);
+		folio = NULL;
 	}
-	return page;
+	return folio;
 }
 
 static bool page_idle_clear_pte_refs_one(struct folio *folio,
@@ -83,10 +86,8 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio,
 	return true;
 }
 
-static void page_idle_clear_pte_refs(struct page *page)
+static void page_idle_clear_pte_refs(struct folio *folio)
 {
-	struct folio *folio = page_folio(page);
-
 	/*
 	 * Since rwc.try_lock is unused, rwc is effectively immutable, so we
 	 * can make it static to save some cycles and stack.
@@ -115,7 +116,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
 				     loff_t pos, size_t count)
 {
 	u64 *out = (u64 *)buf;
-	struct page *page;
+	struct folio *folio;
 	unsigned long pfn, end_pfn;
 	int bit;
 
@@ -134,19 +135,19 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
 		bit = pfn % BITMAP_CHUNK_BITS;
 		if (!bit)
 			*out = 0ULL;
-		page = page_idle_get_page(pfn);
-		if (page) {
-			if (page_is_idle(page)) {
+		folio = page_idle_get_folio(pfn);
+		if (folio) {
+			if (folio_test_idle(folio)) {
 				/*
 				 * The page might have been referenced via a
 				 * pte, in which case it is not idle. Clear
 				 * refs and recheck.
 				 */
-				page_idle_clear_pte_refs(page);
-				if (page_is_idle(page))
+				page_idle_clear_pte_refs(folio);
+				if (folio_test_idle(folio))
 					*out |= 1ULL << bit;
 			}
-			put_page(page);
+			folio_put(folio);
 		}
 		if (bit == BITMAP_CHUNK_BITS - 1)
 			out++;
@@ -160,7 +161,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
 				      loff_t pos, size_t count)
 {
 	const u64 *in = (u64 *)buf;
-	struct page *page;
+	struct folio *folio;
 	unsigned long pfn, end_pfn;
 	int bit;
 
@@ -178,11 +179,11 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
 	for (; pfn < end_pfn; pfn++) {
 		bit = pfn % BITMAP_CHUNK_BITS;
 		if ((*in >> bit) & 1) {
-			page = page_idle_get_page(pfn);
-			if (page) {
-				page_idle_clear_pte_refs(page);
-				set_page_idle(page);
-				put_page(page);
+			folio = page_idle_get_folio(pfn);
+			if (folio) {
+				page_idle_clear_pte_refs(folio);
+				folio_set_idle(folio);
+				folio_put(folio);
 			}
 		}
 		if (bit == BITMAP_CHUNK_BITS - 1)
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH -next v3 2/7] mm/damon: introduce damon_get_folio()
  2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
  2022-12-28 11:34 ` [PATCH -next v3 1/7] mm: page_idle: convert page idle " Kefeng Wang
@ 2022-12-28 11:34 ` Kefeng Wang
  2022-12-28 22:36   ` Matthew Wilcox
  2022-12-28 11:34 ` [PATCH -next v3 3/7] mm/damon: convert damon_ptep/pmdp_mkold() to use folios Kefeng Wang
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 19+ messages in thread
From: Kefeng Wang @ 2022-12-28 11:34 UTC (permalink / raw)
  To: Andrew Morton, SeongJae Park
  Cc: damon, linux-mm, vishal.moola, willy, david, Kefeng Wang

Introduce damon_get_folio(), and the temporary wrapper function
damon_get_page(), which help us to convert damon related functions
to use folios, and it will be dropped once the conversion is completed.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/damon/ops-common.c | 18 +++++++++++-------
 mm/damon/ops-common.h |  9 ++++++++-
 2 files changed, 19 insertions(+), 8 deletions(-)

diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index 75409601f934..1294a256a87c 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -16,21 +16,25 @@
  * Get an online page for a pfn if it's in the LRU list.  Otherwise, returns
  * NULL.
  *
- * The body of this function is stolen from the 'page_idle_get_page()'.  We
+ * The body of this function is stolen from the 'page_idle_get_folio()'.  We
  * steal rather than reuse it because the code is quite simple.
  */
-struct page *damon_get_page(unsigned long pfn)
+struct folio *damon_get_folio(unsigned long pfn)
 {
 	struct page *page = pfn_to_online_page(pfn);
+	struct folio *folio;
 
-	if (!page || !PageLRU(page) || !get_page_unless_zero(page))
+	if (!page || PageTail(page))
 		return NULL;
 
-	if (unlikely(!PageLRU(page))) {
-		put_page(page);
-		page = NULL;
+	folio = page_folio(page);
+	if (!folio_test_lru(folio) || !folio_try_get(folio))
+		return NULL;
+	if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
+		folio_put(folio);
+		folio = NULL;
 	}
-	return page;
+	return folio;
 }
 
 void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
index 8d82d3722204..65f290f0a9d6 100644
--- a/mm/damon/ops-common.h
+++ b/mm/damon/ops-common.h
@@ -7,7 +7,14 @@
 
 #include <linux/damon.h>
 
-struct page *damon_get_page(unsigned long pfn);
+struct folio *damon_get_folio(unsigned long pfn);
+static inline struct page *damon_get_page(unsigned long pfn)
+{
+	struct folio *folio = damon_get_folio(pfn);
+
+	/* when folio is NULL, return &(0->page) mean return NULL */
+	return &folio->page;
+}
 
 void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
 void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH -next v3 3/7] mm/damon: convert damon_ptep/pmdp_mkold() to use folios
  2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
  2022-12-28 11:34 ` [PATCH -next v3 1/7] mm: page_idle: convert page idle " Kefeng Wang
  2022-12-28 11:34 ` [PATCH -next v3 2/7] mm/damon: introduce damon_get_folio() Kefeng Wang
@ 2022-12-28 11:34 ` Kefeng Wang
  2022-12-28 11:34 ` [PATCH -next v3 4/7] mm/damon/paddr: convert damon_pa_*() " Kefeng Wang
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 19+ messages in thread
From: Kefeng Wang @ 2022-12-28 11:34 UTC (permalink / raw)
  To: Andrew Morton, SeongJae Park
  Cc: damon, linux-mm, vishal.moola, willy, david, Kefeng Wang

With damon_get_folio(), let's convert damon_ptep_mkold() and
damon_pmdp_mkold() to use folios.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/damon/ops-common.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index 1294a256a87c..cc63cf953636 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -40,9 +40,9 @@ struct folio *damon_get_folio(unsigned long pfn)
 void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
 {
 	bool referenced = false;
-	struct page *page = damon_get_page(pte_pfn(*pte));
+	struct folio *folio = damon_get_folio(pte_pfn(*pte));
 
-	if (!page)
+	if (!folio)
 		return;
 
 	if (pte_young(*pte)) {
@@ -56,19 +56,19 @@ void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
 #endif /* CONFIG_MMU_NOTIFIER */
 
 	if (referenced)
-		set_page_young(page);
+		folio_set_young(folio);
 
-	set_page_idle(page);
-	put_page(page);
+	folio_set_idle(folio);
+	folio_put(folio);
 }
 
 void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	bool referenced = false;
-	struct page *page = damon_get_page(pmd_pfn(*pmd));
+	struct folio *folio = damon_get_folio(pmd_pfn(*pmd));
 
-	if (!page)
+	if (!folio)
 		return;
 
 	if (pmd_young(*pmd)) {
@@ -82,10 +82,10 @@ void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
 #endif /* CONFIG_MMU_NOTIFIER */
 
 	if (referenced)
-		set_page_young(page);
+		folio_set_young(folio);
 
-	set_page_idle(page);
-	put_page(page);
+	folio_set_idle(folio);
+	folio_put(folio);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 }
 
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH -next v3 4/7] mm/damon/paddr: convert damon_pa_*() to use folios
  2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
                   ` (2 preceding siblings ...)
  2022-12-28 11:34 ` [PATCH -next v3 3/7] mm/damon: convert damon_ptep/pmdp_mkold() to use folios Kefeng Wang
@ 2022-12-28 11:34 ` Kefeng Wang
  2022-12-29 20:36   ` Matthew Wilcox
  2022-12-28 11:34 ` [PATCH -next v3 5/7] mm/damon/vaddr: convert damon_young_pmd_entry() to use folio Kefeng Wang
                   ` (3 subsequent siblings)
  7 siblings, 1 reply; 19+ messages in thread
From: Kefeng Wang @ 2022-12-28 11:34 UTC (permalink / raw)
  To: Andrew Morton, SeongJae Park
  Cc: damon, linux-mm, vishal.moola, willy, david, Kefeng Wang

With damon_get_folio(), let's convert all the damon_pa_*() to use folios.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/damon/paddr.c | 58 ++++++++++++++++++++++--------------------------
 1 file changed, 26 insertions(+), 32 deletions(-)

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 6334c99e5152..fbfd66199f3f 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -33,17 +33,15 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
 
 static void damon_pa_mkold(unsigned long paddr)
 {
-	struct folio *folio;
-	struct page *page = damon_get_page(PHYS_PFN(paddr));
+	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
 	struct rmap_walk_control rwc = {
 		.rmap_one = __damon_pa_mkold,
 		.anon_lock = folio_lock_anon_vma_read,
 	};
 	bool need_lock;
 
-	if (!page)
+	if (!folio)
 		return;
-	folio = page_folio(page);
 
 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 		folio_set_idle(folio);
@@ -122,8 +120,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
 
 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
 {
-	struct folio *folio;
-	struct page *page = damon_get_page(PHYS_PFN(paddr));
+	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
 	struct damon_pa_access_chk_result result = {
 		.page_sz = PAGE_SIZE,
 		.accessed = false,
@@ -135,9 +132,8 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
 	};
 	bool need_lock;
 
-	if (!page)
+	if (!folio)
 		return false;
-	folio = page_folio(page);
 
 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 		if (folio_test_idle(folio))
@@ -203,18 +199,18 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
 }
 
 static bool __damos_pa_filter_out(struct damos_filter *filter,
-		struct page *page)
+		struct folio *folio)
 {
 	bool matched = false;
 	struct mem_cgroup *memcg;
 
 	switch (filter->type) {
 	case DAMOS_FILTER_TYPE_ANON:
-		matched = PageAnon(page);
+		matched = folio_test_anon(folio);
 		break;
 	case DAMOS_FILTER_TYPE_MEMCG:
 		rcu_read_lock();
-		memcg = page_memcg_check(page);
+		memcg = page_memcg_check(folio_page(folio, 0));
 		if (!memcg)
 			matched = false;
 		else
@@ -231,12 +227,12 @@ static bool __damos_pa_filter_out(struct damos_filter *filter,
 /*
  * damos_pa_filter_out - Return true if the page should be filtered out.
  */
-static bool damos_pa_filter_out(struct damos *scheme, struct page *page)
+static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
 {
 	struct damos_filter *filter;
 
 	damos_for_each_filter(filter, scheme) {
-		if (__damos_pa_filter_out(filter, page))
+		if (__damos_pa_filter_out(filter, folio))
 			return true;
 	}
 	return false;
@@ -245,33 +241,33 @@ static bool damos_pa_filter_out(struct damos *scheme, struct page *page)
 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
 {
 	unsigned long addr, applied;
-	LIST_HEAD(page_list);
+	LIST_HEAD(folio_list);
 
 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
-		struct page *page = damon_get_page(PHYS_PFN(addr));
+		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
 
-		if (!page)
+		if (!folio)
 			continue;
 
-		if (damos_pa_filter_out(s, page)) {
-			put_page(page);
+		if (damos_pa_filter_out(s, folio)) {
+			folio_put(folio);
 			continue;
 		}
 
-		ClearPageReferenced(page);
-		test_and_clear_page_young(page);
-		if (isolate_lru_page(page)) {
-			put_page(page);
+		folio_clear_referenced(folio);
+		folio_test_clear_young(folio);
+		if (folio_isolate_lru(folio)) {
+			folio_put(folio);
 			continue;
 		}
-		if (PageUnevictable(page)) {
-			putback_lru_page(page);
+		if (folio_test_unevictable(folio)) {
+			folio_putback_lru(folio);
 		} else {
-			list_add(&page->lru, &page_list);
-			put_page(page);
+			list_add(&folio->lru, &folio_list);
+			folio_put(folio);
 		}
 	}
-	applied = reclaim_pages(&page_list);
+	applied = reclaim_pages(&folio_list);
 	cond_resched();
 	return applied * PAGE_SIZE;
 }
@@ -282,14 +278,12 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
 	unsigned long addr, applied = 0;
 
 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
-		struct page *page = damon_get_page(PHYS_PFN(addr));
-		struct folio *folio;
+		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
 
-		if (!page)
+		if (!folio)
 			continue;
-		folio = page_folio(page);
 
-		if (damos_pa_filter_out(s, &folio->page)) {
+		if (damos_pa_filter_out(s, folio)) {
 			folio_put(folio);
 			continue;
 		}
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH -next v3 5/7] mm/damon/vaddr: convert damon_young_pmd_entry() to use folio
  2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
                   ` (3 preceding siblings ...)
  2022-12-28 11:34 ` [PATCH -next v3 4/7] mm/damon/paddr: convert damon_pa_*() " Kefeng Wang
@ 2022-12-28 11:34 ` Kefeng Wang
  2022-12-29 21:06   ` Matthew Wilcox
  2022-12-28 11:34 ` [PATCH -next v3 6/7] mm/damon: remove unneed damon_get_page() Kefeng Wang
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 19+ messages in thread
From: Kefeng Wang @ 2022-12-28 11:34 UTC (permalink / raw)
  To: Andrew Morton, SeongJae Park
  Cc: damon, linux-mm, vishal.moola, willy, david, Kefeng Wang

With damon_get_folio(), let's convert damon_young_pmd_entry()
to use folios.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/damon/vaddr.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 15f03df66db6..29227b7a6032 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -431,7 +431,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
 {
 	pte_t *pte;
 	spinlock_t *ptl;
-	struct page *page;
+	struct folio *folio;
 	struct damon_young_walk_private *priv = walk->private;
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -446,16 +446,16 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
 			spin_unlock(ptl);
 			goto regular_page;
 		}
-		page = damon_get_page(pmd_pfn(*pmd));
-		if (!page)
+		folio = damon_get_folio(pmd_pfn(*pmd));
+		if (!folio)
 			goto huge_out;
-		if (pmd_young(*pmd) || !page_is_idle(page) ||
+		if (pmd_young(*pmd) || !folio_test_idle(folio) ||
 					mmu_notifier_test_young(walk->mm,
 						addr)) {
 			*priv->page_sz = HPAGE_PMD_SIZE;
 			priv->young = true;
 		}
-		put_page(page);
+		folio_put(folio);
 huge_out:
 		spin_unlock(ptl);
 		return 0;
@@ -469,15 +469,15 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 	if (!pte_present(*pte))
 		goto out;
-	page = damon_get_page(pte_pfn(*pte));
-	if (!page)
+	folio = damon_get_folio(pte_pfn(*pte));
+	if (!folio)
 		goto out;
-	if (pte_young(*pte) || !page_is_idle(page) ||
+	if (pte_young(*pte) || !folio_test_idle(folio) ||
 			mmu_notifier_test_young(walk->mm, addr)) {
 		*priv->page_sz = PAGE_SIZE;
 		priv->young = true;
 	}
-	put_page(page);
+	folio_put(folio);
 out:
 	pte_unmap_unlock(pte, ptl);
 	return 0;
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH -next v3 6/7] mm/damon: remove unneed damon_get_page()
  2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
                   ` (4 preceding siblings ...)
  2022-12-28 11:34 ` [PATCH -next v3 5/7] mm/damon/vaddr: convert damon_young_pmd_entry() to use folio Kefeng Wang
@ 2022-12-28 11:34 ` Kefeng Wang
  2022-12-28 17:49   ` SeongJae Park
  2022-12-28 11:34 ` [PATCH -next v3 7/7] mm/damon/vaddr: convert hugetlb related function to use folios Kefeng Wang
  2022-12-28 17:55 ` [PATCH -next v3 0/7] mm: convert page_idle/damon " SeongJae Park
  7 siblings, 1 reply; 19+ messages in thread
From: Kefeng Wang @ 2022-12-28 11:34 UTC (permalink / raw)
  To: Andrew Morton, SeongJae Park
  Cc: damon, linux-mm, vishal.moola, willy, david, Kefeng Wang

After all damon_get_page() callers are converted to damon_get_folio(),
remove unneed wrapper damon_get_page().

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/damon/ops-common.h | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
index 65f290f0a9d6..14f4bc69f29b 100644
--- a/mm/damon/ops-common.h
+++ b/mm/damon/ops-common.h
@@ -8,13 +8,6 @@
 #include <linux/damon.h>
 
 struct folio *damon_get_folio(unsigned long pfn);
-static inline struct page *damon_get_page(unsigned long pfn)
-{
-	struct folio *folio = damon_get_folio(pfn);
-
-	/* when folio is NULL, return &(0->page) mean return NULL */
-	return &folio->page;
-}
 
 void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
 void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH -next v3 7/7] mm/damon/vaddr: convert hugetlb related function to use folios
  2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
                   ` (5 preceding siblings ...)
  2022-12-28 11:34 ` [PATCH -next v3 6/7] mm/damon: remove unneed damon_get_page() Kefeng Wang
@ 2022-12-28 11:34 ` Kefeng Wang
  2022-12-28 17:51   ` SeongJae Park
  2022-12-28 17:55 ` [PATCH -next v3 0/7] mm: convert page_idle/damon " SeongJae Park
  7 siblings, 1 reply; 19+ messages in thread
From: Kefeng Wang @ 2022-12-28 11:34 UTC (permalink / raw)
  To: Andrew Morton, SeongJae Park
  Cc: damon, linux-mm, vishal.moola, willy, david, Kefeng Wang

Convert damon_hugetlb_mkold() and damon_young_hugetlb_entry() to
use folios.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/damon/vaddr.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 29227b7a6032..9d92c5eb3a1f 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -335,9 +335,9 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
 {
 	bool referenced = false;
 	pte_t entry = huge_ptep_get(pte);
-	struct page *page = pte_page(entry);
+	struct folio *folio = pfn_folio(pte_pfn(entry));
 
-	get_page(page);
+	folio_get(folio);
 
 	if (pte_young(entry)) {
 		referenced = true;
@@ -352,10 +352,10 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
 #endif /* CONFIG_MMU_NOTIFIER */
 
 	if (referenced)
-		set_page_young(page);
+		folio_set_young(folio);
 
-	set_page_idle(page);
-	put_page(page);
+	folio_set_idle(folio);
+	folio_put(folio);
 }
 
 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
@@ -490,7 +490,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
 {
 	struct damon_young_walk_private *priv = walk->private;
 	struct hstate *h = hstate_vma(walk->vma);
-	struct page *page;
+	struct folio *folio;
 	spinlock_t *ptl;
 	pte_t entry;
 
@@ -499,16 +499,16 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
 	if (!pte_present(entry))
 		goto out;
 
-	page = pte_page(entry);
-	get_page(page);
+	folio = pfn_folio(pte_pfn(entry));
+	folio_get(folio);
 
-	if (pte_young(entry) || !page_is_idle(page) ||
+	if (pte_young(entry) || !folio_test_idle(folio) ||
 	    mmu_notifier_test_young(walk->mm, addr)) {
 		*priv->page_sz = huge_page_size(h);
 		priv->young = true;
 	}
 
-	put_page(page);
+	folio_put(folio);
 
 out:
 	spin_unlock(ptl);
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 1/7] mm: page_idle: convert page idle to use folios
  2022-12-28 11:34 ` [PATCH -next v3 1/7] mm: page_idle: convert page idle " Kefeng Wang
@ 2022-12-28 17:46   ` SeongJae Park
  0 siblings, 0 replies; 19+ messages in thread
From: SeongJae Park @ 2022-12-28 17:46 UTC (permalink / raw)
  To: Kefeng Wang
  Cc: Andrew Morton, SeongJae Park, damon, linux-mm, vishal.moola,
	willy, david

Hi Kefeng,


Thank you for your effort on this work, but I forgot mentioning below trivial
typos in the previous spin.  I should mentioned those earlier, sorry.

On Wed, 28 Dec 2022 19:34:07 +0800 Kefeng Wang <wangkefeng.wang@huawei.com> wrote:

> Firtly, make page_idle_get_page() return a folio, also rename it to

s/Firtly/Firstly/

> page_idle_get_folio(), then, use it to covert page_idle_bitmap_read()

s/covert/convert/

> and page_idle_bitmap_write() functions.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Other than the trivial typos,

Reviewed-by: SeongJae Park <sj@kernel.org>


Thanks,
SJ

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 6/7] mm/damon: remove unneed damon_get_page()
  2022-12-28 11:34 ` [PATCH -next v3 6/7] mm/damon: remove unneed damon_get_page() Kefeng Wang
@ 2022-12-28 17:49   ` SeongJae Park
  0 siblings, 0 replies; 19+ messages in thread
From: SeongJae Park @ 2022-12-28 17:49 UTC (permalink / raw)
  To: Kefeng Wang
  Cc: Andrew Morton, SeongJae Park, damon, linux-mm, vishal.moola,
	willy, david

Hi Kefeng,


I forgot mentioning another trivial typo in this patch earlier, sorry.

On Wed, 28 Dec 2022 19:34:12 +0800 Kefeng Wang <wangkefeng.wang@huawei.com> wrote:

> After all damon_get_page() callers are converted to damon_get_folio(),
> remove unneed wrapper damon_get_page().

s/unneed/unneeded/

For above line and the subject of this patch.

> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Other than the trivial nit,

Reviewed-by: SeongJae Park <sj@kernel.org>


Thanks,
SJ

[...]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 7/7] mm/damon/vaddr: convert hugetlb related function to use folios
  2022-12-28 11:34 ` [PATCH -next v3 7/7] mm/damon/vaddr: convert hugetlb related function to use folios Kefeng Wang
@ 2022-12-28 17:51   ` SeongJae Park
  0 siblings, 0 replies; 19+ messages in thread
From: SeongJae Park @ 2022-12-28 17:51 UTC (permalink / raw)
  To: Kefeng Wang
  Cc: Andrew Morton, SeongJae Park, damon, linux-mm, vishal.moola,
	willy, david

Hi Kefeng,


Yet another typo on the subject that I should mention earlier but forgot.
Sorry for this late comment.

s/function/functions/

On Wed, 28 Dec 2022 19:34:13 +0800 Kefeng Wang <wangkefeng.wang@huawei.com> wrote:

> Convert damon_hugetlb_mkold() and damon_young_hugetlb_entry() to
> use folios.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Other than the trivial nit,


Reviewed-by: SeongJae Park <sj@kernel.org>


Thanks,
SJ
 
[...]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios
  2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
                   ` (6 preceding siblings ...)
  2022-12-28 11:34 ` [PATCH -next v3 7/7] mm/damon/vaddr: convert hugetlb related function to use folios Kefeng Wang
@ 2022-12-28 17:55 ` SeongJae Park
  2022-12-29  2:16   ` Kefeng Wang
  7 siblings, 1 reply; 19+ messages in thread
From: SeongJae Park @ 2022-12-28 17:55 UTC (permalink / raw)
  To: Kefeng Wang
  Cc: Andrew Morton, SeongJae Park, damon, linux-mm, vishal.moola,
	willy, david

Hi Kefeng,

On Wed, 28 Dec 2022 19:34:06 +0800 Kefeng Wang <wangkefeng.wang@huawei.com> wrote:

> v3:
> - more thoroughly converting in page_idle_get_folio() suggested by Matthew
>   also do it in damon_get_folio().
> - address some commets from SeongJae
> 
> v2:
> - drop unsafe pfn_to_online_folio(), convert page_idle_get_page() and 
>   damon_get_page() to return a folio after grab a reference
> - convert damon hugetlb related functions
> - rebased on next-20221226.
> 
> Kefeng Wang (7):
>   mm: page_idle: Convert page idle to use folios
>   mm/damon: introduce damon_get_folio()
>   mm/damon: convert damon_ptep/pmdp_mkold() to use folios
>   mm/damon: paddr: convert damon_pa_*() to use folios
>   mm/damon: vaddr: convert damon_young_pmd_entry() to use folio
>   mm/damon: remove unneed damon_get_page()
>   mm/damon: vaddr: convert hugetlb related function to use folios

Thank you so much for your efforts on this work.  I found I forgot mentioning
some trivial typos in the first, the sixth and the seventh patches, so
commented.  I should have commented those earlier, sorry.

Other than those,

Reviewed-by: SeongJae Park <sj@kernel.org>


Thanks,
SJ

> 
>  mm/damon/ops-common.c | 38 +++++++++++++++-------------
>  mm/damon/ops-common.h |  2 +-
>  mm/damon/paddr.c      | 58 +++++++++++++++++++------------------------
>  mm/damon/vaddr.c      | 38 ++++++++++++++--------------
>  mm/page_idle.c        | 47 ++++++++++++++++++-----------------
>  5 files changed, 91 insertions(+), 92 deletions(-)
> 
> -- 
> 2.35.3
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 2/7] mm/damon: introduce damon_get_folio()
  2022-12-28 11:34 ` [PATCH -next v3 2/7] mm/damon: introduce damon_get_folio() Kefeng Wang
@ 2022-12-28 22:36   ` Matthew Wilcox
  2022-12-28 23:14     ` SeongJae Park
  0 siblings, 1 reply; 19+ messages in thread
From: Matthew Wilcox @ 2022-12-28 22:36 UTC (permalink / raw)
  To: Kefeng Wang
  Cc: Andrew Morton, SeongJae Park, damon, linux-mm, vishal.moola, david

On Wed, Dec 28, 2022 at 07:34:08PM +0800, Kefeng Wang wrote:
> +static inline struct page *damon_get_page(unsigned long pfn)
> +{
> +	struct folio *folio = damon_get_folio(pfn);
> +
> +	/* when folio is NULL, return &(0->page) mean return NULL */
> +	return &folio->page;

I really don't think this comment is needed.  This is the standard idiom
for converting from a folio to its head page.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 2/7] mm/damon: introduce damon_get_folio()
  2022-12-28 22:36   ` Matthew Wilcox
@ 2022-12-28 23:14     ` SeongJae Park
  0 siblings, 0 replies; 19+ messages in thread
From: SeongJae Park @ 2022-12-28 23:14 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: Kefeng Wang, Andrew Morton, SeongJae Park, damon, linux-mm,
	vishal.moola, david

Hi Matthew,

On Wed, 28 Dec 2022 22:36:20 +0000 Matthew Wilcox <willy@infradead.org> wrote:

> On Wed, Dec 28, 2022 at 07:34:08PM +0800, Kefeng Wang wrote:
> > +static inline struct page *damon_get_page(unsigned long pfn)
> > +{
> > +	struct folio *folio = damon_get_folio(pfn);
> > +
> > +	/* when folio is NULL, return &(0->page) mean return NULL */
> > +	return &folio->page;
> 
> I really don't think this comment is needed.  This is the standard idiom
> for converting from a folio to its head page.

Well, I think some of DAMON code readers (at least me) might not yet that
familiar with Folio, as it has not been a while since DAMON started using
Folio.  Also, maybe I overlooked some comments or documents, but I was unable
to sure if the offset of 'page' in 'folio' is intended to be never changed with
any future changes, or not.  So I thought adding one more line of explanation
here could be helpful for someone.

I also show some code using 'folio_page(folio, 0)', so I might not be the only
one who at least sometimes forget the idiom.  For helping people remember this
idiom easier, what do you think about having a new macro or static inline
function, say, 'folio_head_page()' and comment why passing NULL is ok on the
function?  IMHO, that would be easier to read.


Thanks,
SJ

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios
  2022-12-28 17:55 ` [PATCH -next v3 0/7] mm: convert page_idle/damon " SeongJae Park
@ 2022-12-29  2:16   ` Kefeng Wang
  0 siblings, 0 replies; 19+ messages in thread
From: Kefeng Wang @ 2022-12-29  2:16 UTC (permalink / raw)
  To: SeongJae Park; +Cc: Andrew Morton, damon, linux-mm, vishal.moola, willy, david



On 2022/12/29 1:55, SeongJae Park wrote:
> Hi Kefeng,
> 
> On Wed, 28 Dec 2022 19:34:06 +0800 Kefeng Wang <wangkefeng.wang@huawei.com> wrote:
> 
>> v3:
>> - more thoroughly converting in page_idle_get_folio() suggested by Matthew
>>    also do it in damon_get_folio().
>> - address some commets from SeongJae
>>
>> v2:
>> - drop unsafe pfn_to_online_folio(), convert page_idle_get_page() and
>>    damon_get_page() to return a folio after grab a reference
>> - convert damon hugetlb related functions
>> - rebased on next-20221226.
>>
>> Kefeng Wang (7):
>>    mm: page_idle: Convert page idle to use folios
>>    mm/damon: introduce damon_get_folio()
>>    mm/damon: convert damon_ptep/pmdp_mkold() to use folios
>>    mm/damon: paddr: convert damon_pa_*() to use folios
>>    mm/damon: vaddr: convert damon_young_pmd_entry() to use folio
>>    mm/damon: remove unneed damon_get_page()
>>    mm/damon: vaddr: convert hugetlb related function to use folios
> 
> Thank you so much for your efforts on this work.  I found I forgot mentioning
> some trivial typos in the first, the sixth and the seventh patches, so
> commented.  I should have commented those earlier, sorry.
> 
Oh,please forgive my bad English spelling, and thanks for Andrew with 
corrected word when patches are queued.

> Other than those,
> 
> Reviewed-by: SeongJae Park <sj@kernel.org>

Thanks you very much.


> 
> 
> Thanks,
> SJ
> 
>>
>>   mm/damon/ops-common.c | 38 +++++++++++++++-------------
>>   mm/damon/ops-common.h |  2 +-
>>   mm/damon/paddr.c      | 58 +++++++++++++++++++------------------------
>>   mm/damon/vaddr.c      | 38 ++++++++++++++--------------
>>   mm/page_idle.c        | 47 ++++++++++++++++++-----------------
>>   5 files changed, 91 insertions(+), 92 deletions(-)
>>
>> -- 
>> 2.35.3
>>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 4/7] mm/damon/paddr: convert damon_pa_*() to use folios
  2022-12-28 11:34 ` [PATCH -next v3 4/7] mm/damon/paddr: convert damon_pa_*() " Kefeng Wang
@ 2022-12-29 20:36   ` Matthew Wilcox
  2022-12-30  6:40     ` Kefeng Wang
  0 siblings, 1 reply; 19+ messages in thread
From: Matthew Wilcox @ 2022-12-29 20:36 UTC (permalink / raw)
  To: Kefeng Wang
  Cc: Andrew Morton, SeongJae Park, damon, linux-mm, vishal.moola, david

On Wed, Dec 28, 2022 at 07:34:10PM +0800, Kefeng Wang wrote:
> -		memcg = page_memcg_check(page);
> +		memcg = page_memcg_check(folio_page(folio, 0));

I doubly don't like this.  First, it should have been &folio->page.
Second, we should have a folio_memcg_check().  The only reason we don't
is that I hadn't needed one before now.  Try adding this patch on first.

--- 8< ---

From 5fca3ae2278b72d96d99fad5c433cd429a11989d Mon Sep 17 00:00:00 2001
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Date: Thu, 29 Dec 2022 12:59:41 -0500
Subject: [PATCH] memcg: Add folio_memcg_check()

Convert page_memcg_check() into folio_memcg_check() and add a
page_memcg_check() wrapper.  The behaviour of page_memcg_check() is
unchanged; tail pages always had a NULL ->memcg_data.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/memcontrol.h | 40 +++++++++++++++++++++++++-------------
 mm/memcontrol.c            |  6 +++---
 2 files changed, 29 insertions(+), 17 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d3c8203cab6c..a2ebb4e2da63 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -466,34 +466,34 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
 }
 
 /*
- * page_memcg_check - get the memory cgroup associated with a page
- * @page: a pointer to the page struct
+ * folio_memcg_check - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
  *
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function unlike page_memcg() can take any page
- * as an argument. It has to be used in cases when it's not known if a page
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function unlike folio_memcg() can take any folio
+ * as an argument. It has to be used in cases when it's not known if a folio
  * has an associated memory cgroup pointer or an object cgroups vector or
  * an object cgroup.
  *
- * For a non-kmem page any of the following ensures page and memcg binding
+ * For a non-kmem folio any of the following ensures folio and memcg binding
  * stability:
  *
- * - the page lock
+ * - the folio lock
  * - LRU isolation
- * - lock_page_memcg()
+ * - lock_folio_memcg()
  * - exclusive reference
  * - mem_cgroup_trylock_pages()
  *
- * For a kmem page a caller should hold an rcu read lock to protect memcg
- * associated with a kmem page from being released.
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
  */
-static inline struct mem_cgroup *page_memcg_check(struct page *page)
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
 {
 	/*
-	 * Because page->memcg_data might be changed asynchronously
-	 * for slab pages, READ_ONCE() should be used here.
+	 * Because folio->memcg_data might be changed asynchronously
+	 * for slabs, READ_ONCE() should be used here.
 	 */
-	unsigned long memcg_data = READ_ONCE(page->memcg_data);
+	unsigned long memcg_data = READ_ONCE(folio->memcg_data);
 
 	if (memcg_data & MEMCG_DATA_OBJCGS)
 		return NULL;
@@ -508,6 +508,13 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
 	return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
 }
 
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+	if (PageTail(page))
+		return NULL;
+	return folio_memcg_check((struct folio *)page);
+}
+
 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
 {
 	struct mem_cgroup *memcg;
@@ -1165,6 +1172,11 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
 	return NULL;
 }
 
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
+{
+	return NULL;
+}
+
 static inline struct mem_cgroup *page_memcg_check(struct page *page)
 {
 	return NULL;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 92f319ef6c99..259bc0a48d16 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2939,13 +2939,13 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
 	}
 
 	/*
-	 * page_memcg_check() is used here, because in theory we can encounter
+	 * folio_memcg_check() is used here, because in theory we can encounter
 	 * a folio where the slab flag has been cleared already, but
 	 * slab->memcg_data has not been freed yet
-	 * page_memcg_check(page) will guarantee that a proper memory
+	 * folio_memcg_check() will guarantee that a proper memory
 	 * cgroup pointer or NULL will be returned.
 	 */
-	return page_memcg_check(folio_page(folio, 0));
+	return folio_memcg_check(folio);
 }
 
 /*
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 5/7] mm/damon/vaddr: convert damon_young_pmd_entry() to use folio
  2022-12-28 11:34 ` [PATCH -next v3 5/7] mm/damon/vaddr: convert damon_young_pmd_entry() to use folio Kefeng Wang
@ 2022-12-29 21:06   ` Matthew Wilcox
  2022-12-29 21:31     ` SeongJae Park
  0 siblings, 1 reply; 19+ messages in thread
From: Matthew Wilcox @ 2022-12-29 21:06 UTC (permalink / raw)
  To: Kefeng Wang
  Cc: Andrew Morton, SeongJae Park, damon, linux-mm, vishal.moola, david

On Wed, Dec 28, 2022 at 07:34:11PM +0800, Kefeng Wang wrote:
> -		if (pmd_young(*pmd) || !page_is_idle(page) ||
> +		if (pmd_young(*pmd) || !folio_test_idle(folio) ||
>  					mmu_notifier_test_young(walk->mm,
>  						addr)) {
>  			*priv->page_sz = HPAGE_PMD_SIZE;

hmm ...

>  	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
>  	if (!pte_present(*pte))
>  		goto out;
> -	page = damon_get_page(pte_pfn(*pte));
> -	if (!page)
> +	folio = damon_get_folio(pte_pfn(*pte));
> +	if (!folio)
>  		goto out;
> -	if (pte_young(*pte) || !page_is_idle(page) ||
> +	if (pte_young(*pte) || !folio_test_idle(folio) ||
>  			mmu_notifier_test_young(walk->mm, addr)) {
>  		*priv->page_sz = PAGE_SIZE;

hmm ...

So why aren't we doing '*priv->page_sz = folio_size(folio)'?  What does
DAMON want to do when encountering folios that are neither PAGE_SIZE
nor HPAGE_PMD_SIZE?

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 5/7] mm/damon/vaddr: convert damon_young_pmd_entry() to use folio
  2022-12-29 21:06   ` Matthew Wilcox
@ 2022-12-29 21:31     ` SeongJae Park
  0 siblings, 0 replies; 19+ messages in thread
From: SeongJae Park @ 2022-12-29 21:31 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: Kefeng Wang, Andrew Morton, SeongJae Park, damon, linux-mm,
	vishal.moola, david

On Thu, 29 Dec 2022 21:06:12 +0000 Matthew Wilcox <willy@infradead.org> wrote:

> On Wed, Dec 28, 2022 at 07:34:11PM +0800, Kefeng Wang wrote:
> > -		if (pmd_young(*pmd) || !page_is_idle(page) ||
> > +		if (pmd_young(*pmd) || !folio_test_idle(folio) ||
> >  					mmu_notifier_test_young(walk->mm,
> >  						addr)) {
> >  			*priv->page_sz = HPAGE_PMD_SIZE;
> 
> hmm ...
> 
> >  	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
> >  	if (!pte_present(*pte))
> >  		goto out;
> > -	page = damon_get_page(pte_pfn(*pte));
> > -	if (!page)
> > +	folio = damon_get_folio(pte_pfn(*pte));
> > +	if (!folio)
> >  		goto out;
> > -	if (pte_young(*pte) || !page_is_idle(page) ||
> > +	if (pte_young(*pte) || !folio_test_idle(folio) ||
> >  			mmu_notifier_test_young(walk->mm, addr)) {
> >  		*priv->page_sz = PAGE_SIZE;
> 
> hmm ...
> 
> So why aren't we doing '*priv->page_sz = folio_size(folio)'?  What does
> DAMON want to do when encountering folios that are neither PAGE_SIZE
> nor HPAGE_PMD_SIZE?

Good point.  We use the field to know if next address to check access is in
same folio and therefore if we could reuse the last access check result.  So it
would be better to use 'folio_size(folio)' here.  The field name would also
better to be 'folio_sz'.  I will make the change, unless others do.


Thanks,
SJ

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH -next v3 4/7] mm/damon/paddr: convert damon_pa_*() to use folios
  2022-12-29 20:36   ` Matthew Wilcox
@ 2022-12-30  6:40     ` Kefeng Wang
  0 siblings, 0 replies; 19+ messages in thread
From: Kefeng Wang @ 2022-12-30  6:40 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: Andrew Morton, SeongJae Park, damon, linux-mm, vishal.moola, david



On 2022/12/30 4:36, Matthew Wilcox wrote:
> On Wed, Dec 28, 2022 at 07:34:10PM +0800, Kefeng Wang wrote:
>> -		memcg = page_memcg_check(page);
>> +		memcg = page_memcg_check(folio_page(folio, 0));
> 
> I doubly don't like this.  First, it should have been &folio->page.
> Second, we should have a folio_memcg_check().  The only reason we don't
> is that I hadn't needed one before now.  Try adding this patch on first.

OK, I will add this into patch-set and resend v4(also corrected spell), 
thanks.

> 
> --- 8< ---
> 
>>From 5fca3ae2278b72d96d99fad5c433cd429a11989d Mon Sep 17 00:00:00 2001
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> Date: Thu, 29 Dec 2022 12:59:41 -0500
> Subject: [PATCH] memcg: Add folio_memcg_check()
> 
> Convert page_memcg_check() into folio_memcg_check() and add a
> page_memcg_check() wrapper.  The behaviour of page_memcg_check() is
> unchanged; tail pages always had a NULL ->memcg_data.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   include/linux/memcontrol.h | 40 +++++++++++++++++++++++++-------------
>   mm/memcontrol.c            |  6 +++---
>   2 files changed, 29 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index d3c8203cab6c..a2ebb4e2da63 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -466,34 +466,34 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
>   }
>   
>   /*
> - * page_memcg_check - get the memory cgroup associated with a page
> - * @page: a pointer to the page struct
> + * folio_memcg_check - Get the memory cgroup associated with a folio.
> + * @folio: Pointer to the folio.
>    *
> - * Returns a pointer to the memory cgroup associated with the page,
> - * or NULL. This function unlike page_memcg() can take any page
> - * as an argument. It has to be used in cases when it's not known if a page
> + * Returns a pointer to the memory cgroup associated with the folio,
> + * or NULL. This function unlike folio_memcg() can take any folio
> + * as an argument. It has to be used in cases when it's not known if a folio
>    * has an associated memory cgroup pointer or an object cgroups vector or
>    * an object cgroup.
>    *
> - * For a non-kmem page any of the following ensures page and memcg binding
> + * For a non-kmem folio any of the following ensures folio and memcg binding
>    * stability:
>    *
> - * - the page lock
> + * - the folio lock
>    * - LRU isolation
> - * - lock_page_memcg()
> + * - lock_folio_memcg()
>    * - exclusive reference
>    * - mem_cgroup_trylock_pages()
>    *
> - * For a kmem page a caller should hold an rcu read lock to protect memcg
> - * associated with a kmem page from being released.
> + * For a kmem folio a caller should hold an rcu read lock to protect memcg
> + * associated with a kmem folio from being released.
>    */
> -static inline struct mem_cgroup *page_memcg_check(struct page *page)
> +static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
>   {
>   	/*
> -	 * Because page->memcg_data might be changed asynchronously
> -	 * for slab pages, READ_ONCE() should be used here.
> +	 * Because folio->memcg_data might be changed asynchronously
> +	 * for slabs, READ_ONCE() should be used here.
>   	 */
> -	unsigned long memcg_data = READ_ONCE(page->memcg_data);
> +	unsigned long memcg_data = READ_ONCE(folio->memcg_data);
>   
>   	if (memcg_data & MEMCG_DATA_OBJCGS)
>   		return NULL;
> @@ -508,6 +508,13 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
>   	return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
>   }
>   
> +static inline struct mem_cgroup *page_memcg_check(struct page *page)
> +{
> +	if (PageTail(page))
> +		return NULL;
> +	return folio_memcg_check((struct folio *)page);
> +}
> +
>   static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
>   {
>   	struct mem_cgroup *memcg;
> @@ -1165,6 +1172,11 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
>   	return NULL;
>   }
>   
> +static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
> +{
> +	return NULL;
> +}
> +
>   static inline struct mem_cgroup *page_memcg_check(struct page *page)
>   {
>   	return NULL;
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 92f319ef6c99..259bc0a48d16 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2939,13 +2939,13 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
>   	}
>   
>   	/*
> -	 * page_memcg_check() is used here, because in theory we can encounter
> +	 * folio_memcg_check() is used here, because in theory we can encounter
>   	 * a folio where the slab flag has been cleared already, but
>   	 * slab->memcg_data has not been freed yet
> -	 * page_memcg_check(page) will guarantee that a proper memory
> +	 * folio_memcg_check() will guarantee that a proper memory
>   	 * cgroup pointer or NULL will be returned.
>   	 */
> -	return page_memcg_check(folio_page(folio, 0));
> +	return folio_memcg_check(folio);
>   }
>   
>   /*

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2022-12-30  6:41 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-28 11:34 [PATCH -next v3 0/7] mm: convert page_idle/damon to use folios Kefeng Wang
2022-12-28 11:34 ` [PATCH -next v3 1/7] mm: page_idle: convert page idle " Kefeng Wang
2022-12-28 17:46   ` SeongJae Park
2022-12-28 11:34 ` [PATCH -next v3 2/7] mm/damon: introduce damon_get_folio() Kefeng Wang
2022-12-28 22:36   ` Matthew Wilcox
2022-12-28 23:14     ` SeongJae Park
2022-12-28 11:34 ` [PATCH -next v3 3/7] mm/damon: convert damon_ptep/pmdp_mkold() to use folios Kefeng Wang
2022-12-28 11:34 ` [PATCH -next v3 4/7] mm/damon/paddr: convert damon_pa_*() " Kefeng Wang
2022-12-29 20:36   ` Matthew Wilcox
2022-12-30  6:40     ` Kefeng Wang
2022-12-28 11:34 ` [PATCH -next v3 5/7] mm/damon/vaddr: convert damon_young_pmd_entry() to use folio Kefeng Wang
2022-12-29 21:06   ` Matthew Wilcox
2022-12-29 21:31     ` SeongJae Park
2022-12-28 11:34 ` [PATCH -next v3 6/7] mm/damon: remove unneed damon_get_page() Kefeng Wang
2022-12-28 17:49   ` SeongJae Park
2022-12-28 11:34 ` [PATCH -next v3 7/7] mm/damon/vaddr: convert hugetlb related function to use folios Kefeng Wang
2022-12-28 17:51   ` SeongJae Park
2022-12-28 17:55 ` [PATCH -next v3 0/7] mm: convert page_idle/damon " SeongJae Park
2022-12-29  2:16   ` Kefeng Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.