All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zi Yan <zi.yan@sent.com>
To: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Yang Shi <shy828301@gmail.com>, Yu Zhao <yuzhao@google.com>,
	linux-mm@kvack.org
Cc: Zi Yan <ziy@nvidia.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-kselftest@vger.kernel.org
Subject: [PATCH 2/5] mm: page_owner: add support for splitting to any order in split page_owner.
Date: Mon, 20 Mar 2023 20:48:26 -0400	[thread overview]
Message-ID: <20230321004829.2012847-3-zi.yan@sent.com> (raw)
In-Reply-To: <20230321004829.2012847-1-zi.yan@sent.com>

From: Zi Yan <ziy@nvidia.com>

It adds a new_order parameter to set new page order in page owner and
uses old_order instead of nr to make the parameters look consistent.
It prepares for upcoming changes to support split huge page to any
lower order.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 include/linux/page_owner.h | 12 +++++++-----
 mm/huge_memory.c           |  3 ++-
 mm/page_alloc.c            |  6 +++---
 mm/page_owner.c            | 11 ++++++-----
 4 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 119a0c9d2a8b..67d98de3d5a8 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -11,7 +11,8 @@ extern struct page_ext_operations page_owner_ops;
 extern void __reset_page_owner(struct page *page, unsigned short order);
 extern void __set_page_owner(struct page *page,
 			unsigned short order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int nr);
+extern void __split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order);
 extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(const struct page *page);
@@ -31,10 +32,11 @@ static inline void set_page_owner(struct page *page,
 		__set_page_owner(page, order, gfp_mask);
 }
 
-static inline void split_page_owner(struct page *page, unsigned int nr)
+static inline void split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order)
 {
 	if (static_branch_unlikely(&page_owner_inited))
-		__split_page_owner(page, nr);
+		__split_page_owner(page, old_order, new_order);
 }
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
 {
@@ -56,11 +58,11 @@ static inline void reset_page_owner(struct page *page, unsigned short order)
 {
 }
 static inline void set_page_owner(struct page *page,
-			unsigned int order, gfp_t gfp_mask)
+			unsigned short order, gfp_t gfp_mask)
 {
 }
 static inline void split_page_owner(struct page *page,
-			unsigned short order)
+			unsigned short old_order, unsigned short new_order)
 {
 }
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 30e3e300c42e..710189885402 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2511,6 +2511,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	struct lruvec *lruvec;
 	struct address_space *swap_cache = NULL;
 	unsigned long offset = 0;
+	unsigned int order = thp_order(head);
 	unsigned int nr = thp_nr_pages(head);
 	int i;
 
@@ -2556,7 +2557,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	unlock_page_lruvec(lruvec);
 	/* Caller disabled irqs, so they are still disabled here */
 
-	split_page_owner(head, nr);
+	split_page_owner(head, order, 0);
 
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59c2b6696698..ec85562865fb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3530,7 +3530,7 @@ void split_page(struct page *page, unsigned int order)
 
 	for (i = 1; i < (1 << order); i++)
 		set_page_refcounted(page + i);
-	split_page_owner(page, 1 << order);
+	split_page_owner(page, order, 0);
 	split_page_memcg(page, 1 << order, 1);
 }
 EXPORT_SYMBOL_GPL(split_page);
@@ -5746,8 +5746,8 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
 		struct page *page = virt_to_page((void *)addr);
 		struct page *last = page + nr;
 
-		split_page_owner(page, 1 << order);
-		split_page_memcg(page, 1 << order);
+		split_page_owner(page, 1 << order, 1);
+		split_page_memcg(page, 1 << order, 1);
 		while (page < --last)
 			set_page_refcounted(last);
 
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 31169b3e7f06..33d1b6efe6a6 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -211,19 +211,20 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
 	page_ext_put(page_ext);
 }
 
-void __split_page_owner(struct page *page, unsigned int nr)
+void __split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order)
 {
-	int i;
+	int i, old_nr = 1 << old_order, new_nr = 1 << new_order;
 	struct page_ext *page_ext = page_ext_get(page);
 	struct page_owner *page_owner;
 
 	if (unlikely(!page_ext))
 		return;
 
-	for (i = 0; i < nr; i++) {
+	for (i = 0; i < old_nr; i += new_nr) {
+		page_ext = lookup_page_ext(page + i);
 		page_owner = get_page_owner(page_ext);
-		page_owner->order = 0;
-		page_ext = page_ext_next(page_ext);
+		page_owner->order = new_order;
 	}
 	page_ext_put(page_ext);
 }
-- 
2.39.2


WARNING: multiple messages have this Message-ID (diff)
From: Zi Yan <zi.yan-vRdzynncJC4@public.gmane.org>
To: "Matthew Wilcox (Oracle)"
	<willy-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>,
	Yang Shi <shy828301-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
	Yu Zhao <yuzhao-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org
Cc: Zi Yan <ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>,
	Andrew Morton
	<akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-fsdevel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-kselftest-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Subject: [PATCH 2/5] mm: page_owner: add support for splitting to any order in split page_owner.
Date: Mon, 20 Mar 2023 20:48:26 -0400	[thread overview]
Message-ID: <20230321004829.2012847-3-zi.yan@sent.com> (raw)
In-Reply-To: <20230321004829.2012847-1-zi.yan-vRdzynncJC4@public.gmane.org>

From: Zi Yan <ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>

It adds a new_order parameter to set new page order in page owner and
uses old_order instead of nr to make the parameters look consistent.
It prepares for upcoming changes to support split huge page to any
lower order.

Signed-off-by: Zi Yan <ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
---
 include/linux/page_owner.h | 12 +++++++-----
 mm/huge_memory.c           |  3 ++-
 mm/page_alloc.c            |  6 +++---
 mm/page_owner.c            | 11 ++++++-----
 4 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 119a0c9d2a8b..67d98de3d5a8 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -11,7 +11,8 @@ extern struct page_ext_operations page_owner_ops;
 extern void __reset_page_owner(struct page *page, unsigned short order);
 extern void __set_page_owner(struct page *page,
 			unsigned short order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int nr);
+extern void __split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order);
 extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(const struct page *page);
@@ -31,10 +32,11 @@ static inline void set_page_owner(struct page *page,
 		__set_page_owner(page, order, gfp_mask);
 }
 
-static inline void split_page_owner(struct page *page, unsigned int nr)
+static inline void split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order)
 {
 	if (static_branch_unlikely(&page_owner_inited))
-		__split_page_owner(page, nr);
+		__split_page_owner(page, old_order, new_order);
 }
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
 {
@@ -56,11 +58,11 @@ static inline void reset_page_owner(struct page *page, unsigned short order)
 {
 }
 static inline void set_page_owner(struct page *page,
-			unsigned int order, gfp_t gfp_mask)
+			unsigned short order, gfp_t gfp_mask)
 {
 }
 static inline void split_page_owner(struct page *page,
-			unsigned short order)
+			unsigned short old_order, unsigned short new_order)
 {
 }
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 30e3e300c42e..710189885402 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2511,6 +2511,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	struct lruvec *lruvec;
 	struct address_space *swap_cache = NULL;
 	unsigned long offset = 0;
+	unsigned int order = thp_order(head);
 	unsigned int nr = thp_nr_pages(head);
 	int i;
 
@@ -2556,7 +2557,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	unlock_page_lruvec(lruvec);
 	/* Caller disabled irqs, so they are still disabled here */
 
-	split_page_owner(head, nr);
+	split_page_owner(head, order, 0);
 
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59c2b6696698..ec85562865fb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3530,7 +3530,7 @@ void split_page(struct page *page, unsigned int order)
 
 	for (i = 1; i < (1 << order); i++)
 		set_page_refcounted(page + i);
-	split_page_owner(page, 1 << order);
+	split_page_owner(page, order, 0);
 	split_page_memcg(page, 1 << order, 1);
 }
 EXPORT_SYMBOL_GPL(split_page);
@@ -5746,8 +5746,8 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
 		struct page *page = virt_to_page((void *)addr);
 		struct page *last = page + nr;
 
-		split_page_owner(page, 1 << order);
-		split_page_memcg(page, 1 << order);
+		split_page_owner(page, 1 << order, 1);
+		split_page_memcg(page, 1 << order, 1);
 		while (page < --last)
 			set_page_refcounted(last);
 
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 31169b3e7f06..33d1b6efe6a6 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -211,19 +211,20 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
 	page_ext_put(page_ext);
 }
 
-void __split_page_owner(struct page *page, unsigned int nr)
+void __split_page_owner(struct page *page, unsigned short old_order,
+			unsigned short new_order)
 {
-	int i;
+	int i, old_nr = 1 << old_order, new_nr = 1 << new_order;
 	struct page_ext *page_ext = page_ext_get(page);
 	struct page_owner *page_owner;
 
 	if (unlikely(!page_ext))
 		return;
 
-	for (i = 0; i < nr; i++) {
+	for (i = 0; i < old_nr; i += new_nr) {
+		page_ext = lookup_page_ext(page + i);
 		page_owner = get_page_owner(page_ext);
-		page_owner->order = 0;
-		page_ext = page_ext_next(page_ext);
+		page_owner->order = new_order;
 	}
 	page_ext_put(page_ext);
 }
-- 
2.39.2


  parent reply	other threads:[~2023-03-21  0:49 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-21  0:48 [PATCH 0/5] Split a folio to any lower order folios Zi Yan
2023-03-21  0:48 ` Zi Yan
2023-03-21  0:48 ` [PATCH 1/5] mm: memcg: make memcg huge page split support any order split Zi Yan
2023-03-21  0:48   ` Zi Yan
2023-03-21  0:48 ` Zi Yan [this message]
2023-03-21  0:48   ` [PATCH 2/5] mm: page_owner: add support for splitting to any order in split page_owner Zi Yan
2023-03-24 15:17   ` Michal Koutný
2023-03-24 15:17     ` Michal Koutný
2023-03-24 15:22     ` Zi Yan
2023-03-24 15:22       ` Zi Yan
2023-03-21  0:48 ` [PATCH 3/5] mm: thp: split huge page to any lower order pages Zi Yan
2023-03-22  7:55   ` Ryan Roberts
2023-03-22  7:55     ` Ryan Roberts
2023-03-22 14:27     ` Zi Yan
2023-03-22 14:27       ` Zi Yan
2023-03-22 14:48       ` Ryan Roberts
2023-03-21  0:48 ` [PATCH 4/5] mm: truncate: split huge page cache page to a non-zero order if possible Zi Yan
2023-03-21  0:48   ` Zi Yan
2023-03-21  0:48 ` [PATCH 5/5] mm: huge_memory: enable debugfs to split huge pages to any order Zi Yan
2023-03-21  0:48   ` Zi Yan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230321004829.2012847-3-zi.yan@sent.com \
    --to=zi.yan@sent.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=shy828301@gmail.com \
    --cc=willy@infradead.org \
    --cc=yuzhao@google.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.