All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sidhartha Kumar <sidhartha.kumar@oracle.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: akpm@linux-foundation.org, songmuchun@bytedance.com,
	mike.kravetz@oracle.com, willy@infradead.org,
	tsahu@linux.ibm.com, jhubbard@nvidia.com,
	Sidhartha Kumar <sidhartha.kumar@oracle.com>
Subject: [PATCH mm-unstable 3/8] mm/hugetlb: convert dequeue_hugetlb_page_node functions to folios
Date: Tue,  3 Jan 2023 13:13:35 -0600	[thread overview]
Message-ID: <20230103191340.116536-4-sidhartha.kumar@oracle.com> (raw)
In-Reply-To: <20230103191340.116536-1-sidhartha.kumar@oracle.com>

dequeue_huge_page_node_exact() is changed to dequeue_hugetlb_folio_node_
exact() and dequeue_huge_page_nodemask() is changed to dequeue_hugetlb_
folio_nodemask(). Update their callers to pass in a folio.

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
 mm/hugetlb.c | 55 +++++++++++++++++++++++++++++-----------------------
 1 file changed, 31 insertions(+), 24 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b06ec8d60794..8dffb77d3510 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1140,32 +1140,36 @@ static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
 	folio_set_hugetlb_freed(folio);
 }
 
-static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
+static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
+								int nid)
 {
 	struct page *page;
+	struct folio *folio;
 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
 
 	lockdep_assert_held(&hugetlb_lock);
 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
-		if (pin && !is_longterm_pinnable_page(page))
+		folio = page_folio(page);
+
+		if (pin && !folio_is_longterm_pinnable(folio))
 			continue;
 
-		if (PageHWPoison(page))
+		if (folio_test_hwpoison(folio))
 			continue;
 
-		list_move(&page->lru, &h->hugepage_activelist);
-		set_page_refcounted(page);
-		ClearHPageFreed(page);
+		list_move(&folio->lru, &h->hugepage_activelist);
+		folio_ref_unfreeze(folio, 1);
+		folio_clear_hugetlb_freed(folio);
 		h->free_huge_pages--;
 		h->free_huge_pages_node[nid]--;
-		return page;
+		return folio;
 	}
 
 	return NULL;
 }
 
-static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
-		nodemask_t *nmask)
+static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
+							int nid, nodemask_t *nmask)
 {
 	unsigned int cpuset_mems_cookie;
 	struct zonelist *zonelist;
@@ -1178,7 +1182,7 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
 retry_cpuset:
 	cpuset_mems_cookie = read_mems_allowed_begin();
 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
-		struct page *page;
+		struct folio *folio;
 
 		if (!cpuset_zone_allowed(zone, gfp_mask))
 			continue;
@@ -1190,9 +1194,9 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
 			continue;
 		node = zone_to_nid(zone);
 
-		page = dequeue_huge_page_node_exact(h, node);
-		if (page)
-			return page;
+		folio = dequeue_hugetlb_folio_node_exact(h, node);
+		if (folio)
+			return folio;
 	}
 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
 		goto retry_cpuset;
@@ -1210,7 +1214,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
 				unsigned long address, int avoid_reserve,
 				long chg)
 {
-	struct page *page = NULL;
+	struct folio *folio = NULL;
 	struct mempolicy *mpol;
 	gfp_t gfp_mask;
 	nodemask_t *nodemask;
@@ -1232,22 +1236,24 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
 
 	if (mpol_is_preferred_many(mpol)) {
-		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
+		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
+							nid, nodemask);
 
 		/* Fallback to all nodes if page==NULL */
 		nodemask = NULL;
 	}
 
-	if (!page)
-		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
+	if (!folio)
+		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
+							nid, nodemask);
 
-	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
-		SetHPageRestoreReserve(page);
+	if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
+		folio_set_hugetlb_restore_reserve(folio);
 		h->resv_huge_pages--;
 	}
 
 	mpol_cond_put(mpol);
-	return page;
+	return &folio->page;
 
 err:
 	return NULL;
@@ -2331,12 +2337,13 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 {
 	spin_lock_irq(&hugetlb_lock);
 	if (available_huge_pages(h)) {
-		struct page *page;
+		struct folio *folio;
 
-		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
-		if (page) {
+		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
+						preferred_nid, nmask);
+		if (folio) {
 			spin_unlock_irq(&hugetlb_lock);
-			return page;
+			return &folio->page;
 		}
 	}
 	spin_unlock_irq(&hugetlb_lock);
-- 
2.39.0


  parent reply	other threads:[~2023-01-03 19:17 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-03 19:13 [PATCH mm-unstable 0/9] continue hugetlb folio conversions Sidhartha Kumar
2023-01-03 19:13 ` [PATCH mm-unstable 1/8] mm/hugetlb: convert isolate_hugetlb to folios Sidhartha Kumar
2023-01-03 20:56   ` Matthew Wilcox
2023-01-06 23:04   ` Mike Kravetz
2023-01-03 19:13 ` [PATCH mm-unstable 2/8] mm/hugetlb: convert __update_and_free_page() " Sidhartha Kumar
2023-01-06 23:46   ` Mike Kravetz
2023-01-03 19:13 ` Sidhartha Kumar [this message]
2023-01-03 21:00   ` [PATCH mm-unstable 3/8] mm/hugetlb: convert dequeue_hugetlb_page_node functions " Matthew Wilcox
2023-01-06 23:57     ` Mike Kravetz
2023-01-03 19:13 ` [PATCH mm-unstable 4/8] mm/hugetlb: convert alloc_surplus_huge_page() " Sidhartha Kumar
2023-01-07  0:15   ` Mike Kravetz
2023-01-03 19:13 ` [PATCH mm-unstable 5/8] mm/hugetlb: increase use of folios in alloc_huge_page() Sidhartha Kumar
2023-01-07  0:30   ` Mike Kravetz
2023-01-03 19:13 ` [PATCH mm-unstable 6/8] mm/hugetlb: convert alloc_migrate_huge_page to folios Sidhartha Kumar
2023-01-07  0:54   ` Mike Kravetz
2023-01-09 16:26     ` Sidhartha Kumar
2023-01-09 18:21       ` Mike Kravetz
2023-01-03 19:13 ` [PATCH mm-unstable 7/8] mm/hugetlb: convert restore_reserve_on_error() " Sidhartha Kumar
2023-01-07  0:57   ` Mike Kravetz
2023-01-03 19:13 ` [PATCH mm-unstable 8/8] mm/hugetlb: convert demote_free_huge_page " Sidhartha Kumar
2023-01-07  1:11   ` Mike Kravetz
2023-01-07  1:31     ` Matthew Wilcox
2023-01-07 20:55       ` Mike Kravetz
2023-01-09 16:36         ` Sidhartha Kumar
2023-01-09 18:23           ` Mike Kravetz
2023-01-09 20:01             ` John Hubbard
2023-01-09 20:53               ` Sidhartha Kumar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230103191340.116536-4-sidhartha.kumar@oracle.com \
    --to=sidhartha.kumar@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=jhubbard@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mike.kravetz@oracle.com \
    --cc=songmuchun@bytedance.com \
    --cc=tsahu@linux.ibm.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.