All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zi Yan <zi.yan@sent.com>
To: Matthew Wilcox <willy@infradead.org>, linux-mm@kvack.org
Cc: Roman Gushchin <roman.gushchin@linux.dev>,
	Shuah Khan <shuah@kernel.org>, Yang Shi <shy828301@gmail.com>,
	Miaohe Lin <linmiaohe@huawei.com>,
	Hugh Dickins <hughd@google.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
	linux-kselftest@vger.kernel.org, Zi Yan <ziy@nvidia.com>
Subject: [RFC PATCH 2/5] mm: page_owner: add support for splitting to any order in split page_owner.
Date: Mon, 21 Mar 2022 10:21:25 -0400	[thread overview]
Message-ID: <20220321142128.2471199-3-zi.yan@sent.com> (raw)
In-Reply-To: <20220321142128.2471199-1-zi.yan@sent.com>

From: Zi Yan <ziy@nvidia.com>

It adds a new_order parameter to set new page order in page owner and
uses old_order instead of nr to make the parameters look consistent.
It prepares for upcoming changes to support split huge page to any
lower order.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 include/linux/page_owner.h | 12 +++++++-----
 mm/huge_memory.c           |  3 ++-
 mm/page_alloc.c            |  2 +-
 mm/page_owner.c            | 13 +++++++------
 4 files changed, 17 insertions(+), 13 deletions(-)

diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 119a0c9d2a8b..16050cc89274 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -11,7 +11,8 @@ extern struct page_ext_operations page_owner_ops;
 extern void __reset_page_owner(struct page *page, unsigned short order);
 extern void __set_page_owner(struct page *page,
 			unsigned short order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int nr);
+extern void __split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order);
 extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(const struct page *page);
@@ -31,10 +32,11 @@ static inline void set_page_owner(struct page *page,
 		__set_page_owner(page, order, gfp_mask);
 }
 
-static inline void split_page_owner(struct page *page, unsigned int nr)
+static inline void split_page_owner(struct page *page, unsigned int old_order,
+			unsigned int new_order)
 {
 	if (static_branch_unlikely(&page_owner_inited))
-		__split_page_owner(page, nr);
+		__split_page_owner(page, old_order, new_order);
 }
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
 {
@@ -56,11 +58,11 @@ static inline void reset_page_owner(struct page *page, unsigned short order)
 {
 }
 static inline void set_page_owner(struct page *page,
-			unsigned int order, gfp_t gfp_mask)
+			unsigned short order, gfp_t gfp_mask)
 {
 }
 static inline void split_page_owner(struct page *page,
-			unsigned short order)
+			unsigned short old_order, unsigned short new_order)
 {
 }
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 640040c386f0..fcfa46af6c4c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2367,6 +2367,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	struct lruvec *lruvec;
 	struct address_space *swap_cache = NULL;
 	unsigned long offset = 0;
+	unsigned int order = thp_order(head);
 	unsigned int nr = thp_nr_pages(head);
 	int i;
 
@@ -2408,7 +2409,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	unlock_page_lruvec(lruvec);
 	/* Caller disabled irqs, so they are still disabled here */
 
-	split_page_owner(head, nr);
+	split_page_owner(head, order, 0);
 
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d982919b9e51..9cac40c26c58 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3514,7 +3514,7 @@ void split_page(struct page *page, unsigned int order)
 
 	for (i = 1; i < (1 << order); i++)
 		set_page_refcounted(page + i);
-	split_page_owner(page, 1 << order);
+	split_page_owner(page, order, 0);
 	split_page_memcg(page, 1 << order, 0);
 }
 EXPORT_SYMBOL_GPL(split_page);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 0a9588506571..52013c846d19 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -202,19 +202,20 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
 	page_owner->last_migrate_reason = reason;
 }
 
-void __split_page_owner(struct page *page, unsigned int nr)
+void __split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order)
 {
-	int i;
-	struct page_ext *page_ext = lookup_page_ext(page);
+	int i, old_nr = 1 << old_order, new_nr = 1 << new_order;
+	struct page_ext *page_ext;
 	struct page_owner *page_owner;
 
 	if (unlikely(!page_ext))
 		return;
 
-	for (i = 0; i < nr; i++) {
+	for (i = 0; i < old_nr; i += new_nr) {
+		page_ext = lookup_page_ext(page + i);
 		page_owner = get_page_owner(page_ext);
-		page_owner->order = 0;
-		page_ext = page_ext_next(page_ext);
+		page_owner->order = new_order;
 	}
 }
 
-- 
2.35.1


WARNING: multiple messages have this Message-ID (diff)
From: Zi Yan <zi.yan-vRdzynncJC4@public.gmane.org>
To: Matthew Wilcox <willy-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org
Cc: Roman Gushchin
	<roman.gushchin-fxUVXftIFDnyG1zEObXtfA@public.gmane.org>,
	Shuah Khan <shuah-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	Yang Shi <shy828301-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
	Miaohe Lin <linmiaohe-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>,
	Hugh Dickins <hughd-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>,
	"Kirill A . Shutemov"
	<kirill.shutemov-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-kselftest-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	Zi Yan <ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
Subject: [RFC PATCH 2/5] mm: page_owner: add support for splitting to any order in split page_owner.
Date: Mon, 21 Mar 2022 10:21:25 -0400	[thread overview]
Message-ID: <20220321142128.2471199-3-zi.yan@sent.com> (raw)
In-Reply-To: <20220321142128.2471199-1-zi.yan-vRdzynncJC4@public.gmane.org>

From: Zi Yan <ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>

It adds a new_order parameter to set new page order in page owner and
uses old_order instead of nr to make the parameters look consistent.
It prepares for upcoming changes to support split huge page to any
lower order.

Signed-off-by: Zi Yan <ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
---
 include/linux/page_owner.h | 12 +++++++-----
 mm/huge_memory.c           |  3 ++-
 mm/page_alloc.c            |  2 +-
 mm/page_owner.c            | 13 +++++++------
 4 files changed, 17 insertions(+), 13 deletions(-)

diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 119a0c9d2a8b..16050cc89274 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -11,7 +11,8 @@ extern struct page_ext_operations page_owner_ops;
 extern void __reset_page_owner(struct page *page, unsigned short order);
 extern void __set_page_owner(struct page *page,
 			unsigned short order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int nr);
+extern void __split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order);
 extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(const struct page *page);
@@ -31,10 +32,11 @@ static inline void set_page_owner(struct page *page,
 		__set_page_owner(page, order, gfp_mask);
 }
 
-static inline void split_page_owner(struct page *page, unsigned int nr)
+static inline void split_page_owner(struct page *page, unsigned int old_order,
+			unsigned int new_order)
 {
 	if (static_branch_unlikely(&page_owner_inited))
-		__split_page_owner(page, nr);
+		__split_page_owner(page, old_order, new_order);
 }
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
 {
@@ -56,11 +58,11 @@ static inline void reset_page_owner(struct page *page, unsigned short order)
 {
 }
 static inline void set_page_owner(struct page *page,
-			unsigned int order, gfp_t gfp_mask)
+			unsigned short order, gfp_t gfp_mask)
 {
 }
 static inline void split_page_owner(struct page *page,
-			unsigned short order)
+			unsigned short old_order, unsigned short new_order)
 {
 }
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 640040c386f0..fcfa46af6c4c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2367,6 +2367,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	struct lruvec *lruvec;
 	struct address_space *swap_cache = NULL;
 	unsigned long offset = 0;
+	unsigned int order = thp_order(head);
 	unsigned int nr = thp_nr_pages(head);
 	int i;
 
@@ -2408,7 +2409,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	unlock_page_lruvec(lruvec);
 	/* Caller disabled irqs, so they are still disabled here */
 
-	split_page_owner(head, nr);
+	split_page_owner(head, order, 0);
 
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d982919b9e51..9cac40c26c58 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3514,7 +3514,7 @@ void split_page(struct page *page, unsigned int order)
 
 	for (i = 1; i < (1 << order); i++)
 		set_page_refcounted(page + i);
-	split_page_owner(page, 1 << order);
+	split_page_owner(page, order, 0);
 	split_page_memcg(page, 1 << order, 0);
 }
 EXPORT_SYMBOL_GPL(split_page);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 0a9588506571..52013c846d19 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -202,19 +202,20 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
 	page_owner->last_migrate_reason = reason;
 }
 
-void __split_page_owner(struct page *page, unsigned int nr)
+void __split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order)
 {
-	int i;
-	struct page_ext *page_ext = lookup_page_ext(page);
+	int i, old_nr = 1 << old_order, new_nr = 1 << new_order;
+	struct page_ext *page_ext;
 	struct page_owner *page_owner;
 
 	if (unlikely(!page_ext))
 		return;
 
-	for (i = 0; i < nr; i++) {
+	for (i = 0; i < old_nr; i += new_nr) {
+		page_ext = lookup_page_ext(page + i);
 		page_owner = get_page_owner(page_ext);
-		page_owner->order = 0;
-		page_ext = page_ext_next(page_ext);
+		page_owner->order = new_order;
 	}
 }
 
-- 
2.35.1


  parent reply	other threads:[~2022-03-21 14:29 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-21 14:21 [RFC PATCH 0/5] Split a huge page to any lower order pages Zi Yan
2022-03-21 14:21 ` Zi Yan
2022-03-21 14:21 ` [RFC PATCH 1/5] mm: memcg: make memcg huge page split support any order split Zi Yan
2022-03-21 14:21   ` Zi Yan
2022-03-21 18:57   ` Roman Gushchin
2022-03-21 18:57     ` Roman Gushchin
2022-03-21 19:07     ` Zi Yan
2022-03-21 19:07       ` Zi Yan
2022-03-21 19:54       ` Matthew Wilcox
2022-03-21 19:54         ` Matthew Wilcox
2022-03-21 20:26         ` Zi Yan
2022-03-21 20:26           ` Zi Yan
2022-03-21 14:21 ` Zi Yan [this message]
2022-03-21 14:21   ` [RFC PATCH 2/5] mm: page_owner: add support for splitting to any order in split page_owner Zi Yan
2022-03-21 19:02   ` Roman Gushchin
2022-03-21 19:02     ` Roman Gushchin
2022-03-21 19:08     ` Zi Yan
2022-03-21 19:08       ` Zi Yan
2022-03-21 14:21 ` [RFC PATCH 3/5] mm: thp: split huge page to any lower order pages Zi Yan
2022-03-21 14:21   ` Zi Yan
2022-03-21 22:18   ` Roman Gushchin
2022-03-21 22:18     ` Roman Gushchin
2022-03-22 14:21     ` Zi Yan
2022-03-22 14:21       ` Zi Yan
2022-03-22  3:21   ` Miaohe Lin
2022-03-22  3:21     ` Miaohe Lin
2022-03-22 14:30     ` Zi Yan
2022-03-22 14:30       ` Zi Yan
2022-03-23  2:31       ` Miaohe Lin
2022-03-23  2:31         ` Miaohe Lin
2022-03-23 22:10         ` Zi Yan
2022-03-24  2:02           ` Miaohe Lin
2022-03-24  2:02             ` Miaohe Lin
2022-03-22 20:57   ` Yang Shi
2022-03-21 14:21 ` [RFC PATCH 4/5] mm: truncate: split huge page cache page to a non-zero order if possible Zi Yan
2022-03-21 14:21   ` Zi Yan
2022-03-21 22:32   ` Roman Gushchin
2022-03-21 22:32     ` Roman Gushchin
2022-03-22 14:19     ` Zi Yan
2022-03-22 14:19       ` Zi Yan
2022-03-23  6:40   ` [mm] 2757cee2d6: UBSAN:shift-out-of-bounds_in_include/linux/log2.h kernel test robot
2022-03-23  6:40     ` kernel test robot
2022-03-21 14:21 ` [RFC PATCH 5/5] mm: huge_memory: enable debugfs to split huge pages to any order Zi Yan
2022-03-21 14:21   ` Zi Yan
2022-03-21 22:23   ` Roman Gushchin
2022-03-21 22:23     ` Roman Gushchin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220321142128.2471199-3-zi.yan@sent.com \
    --to=zi.yan@sent.com \
    --cc=cgroups@vger.kernel.org \
    --cc=hughd@google.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linmiaohe@huawei.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=roman.gushchin@linux.dev \
    --cc=shuah@kernel.org \
    --cc=shy828301@gmail.com \
    --cc=willy@infradead.org \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.