From: Mike Kravetz <mike.kravetz@oracle.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: David Hildenbrand <david@redhat.com>,
Michal Hocko <mhocko@suse.com>,
Oscar Salvador <osalvador@suse.de>, Zi Yan <ziy@nvidia.com>,
Muchun Song <songmuchun@bytedance.com>,
Naoya Horiguchi <naoya.horiguchi@linux.dev>,
David Rientjes <rientjes@google.com>,
"Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>,
Andrew Morton <akpm@linux-foundation.org>,
Mike Kravetz <mike.kravetz@oracle.com>
Subject: [PATCH v3 5/5] hugetlb: add hugetlb demote page support
Date: Fri, 1 Oct 2021 10:52:10 -0700 [thread overview]
Message-ID: <20211001175210.45968-6-mike.kravetz@oracle.com> (raw)
In-Reply-To: <20211001175210.45968-1-mike.kravetz@oracle.com>
Demote page functionality will split a huge page into a number of huge
pages of a smaller size. For example, on x86 a 1GB huge page can be
demoted into 512 2M huge pages. Demotion is done 'in place' by simply
splitting the huge page.
Added '*_for_demote' wrappers for remove_hugetlb_page,
destroy_compound_gigantic_page and prep_compound_gigantic_page for use
by demote code.
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
mm/hugetlb.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 74 insertions(+), 8 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ccbe323c992b..aec2e737d0cd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1270,7 +1270,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--)
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+/* used to demote non-gigantic_huge pages as well */
static void __destroy_compound_gigantic_page(struct page *page,
unsigned int order, bool demote)
{
@@ -1293,6 +1293,13 @@ static void __destroy_compound_gigantic_page(struct page *page,
__ClearPageHead(page);
}
+static void destroy_compound_gigantic_page_for_demote(struct page *page,
+ unsigned int order)
+{
+ __destroy_compound_gigantic_page(page, order, true);
+}
+
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static void destroy_compound_gigantic_page(struct page *page,
unsigned int order)
{
@@ -1438,6 +1445,12 @@ static void remove_hugetlb_page(struct hstate *h, struct page *page,
__remove_hugetlb_page(h, page, adjust_surplus, false);
}
+static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page,
+ bool adjust_surplus)
+{
+ __remove_hugetlb_page(h, page, adjust_surplus, true);
+}
+
static void add_hugetlb_page(struct hstate *h, struct page *page,
bool adjust_surplus)
{
@@ -1779,6 +1792,12 @@ static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
return __prep_compound_gigantic_page(page, order, false);
}
+static bool prep_compound_gigantic_page_for_demote(struct page *page,
+ unsigned int order)
+{
+ return __prep_compound_gigantic_page(page, order, true);
+}
+
/*
* PageHuge() only returns true for hugetlbfs pages, but not for normal or
* transparent huge pages. See the PageTransHuge() documentation for more
@@ -3302,9 +3321,54 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
return 0;
}
+static int demote_free_huge_page(struct hstate *h, struct page *page)
+{
+ int i, nid = page_to_nid(page);
+ struct hstate *target_hstate;
+ int rc = 0;
+
+ target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
+
+ remove_hugetlb_page_for_demote(h, page, false);
+ spin_unlock_irq(&hugetlb_lock);
+
+ rc = alloc_huge_page_vmemmap(h, page);
+ if (rc) {
+ /* Allocation of vmemmmap failed, we can not demote page */
+ spin_lock_irq(&hugetlb_lock);
+ set_page_refcounted(page);
+ add_hugetlb_page(h, page, false);
+ return rc;
+ }
+
+ /*
+ * Use destroy_compound_gigantic_page_for_demote for all huge page
+ * sizes as it will not ref count pages.
+ */
+ destroy_compound_gigantic_page_for_demote(page, huge_page_order(h));
+
+ for (i = 0; i < pages_per_huge_page(h);
+ i += pages_per_huge_page(target_hstate)) {
+ if (hstate_is_gigantic(target_hstate))
+ prep_compound_gigantic_page_for_demote(page + i,
+ target_hstate->order);
+ else
+ prep_compound_page(page + i, target_hstate->order);
+ set_page_private(page + i, 0);
+ set_page_refcounted(page + i);
+ prep_new_huge_page(target_hstate, page + i, nid);
+ put_page(page + i);
+ }
+
+ spin_lock_irq(&hugetlb_lock);
+ return rc;
+}
+
static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
__must_hold(&hugetlb_lock)
{
+ int nr_nodes, node;
+ struct page *page;
int rc = 0;
lockdep_assert_held(&hugetlb_lock);
@@ -3313,9 +3377,15 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
if (!h->demote_order)
return rc;
- /*
- * TODO - demote fucntionality will be added in subsequent patch
- */
+ for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+ if (!list_empty(&h->hugepage_freelists[node])) {
+ page = list_entry(h->hugepage_freelists[node].next,
+ struct page, lru);
+ rc = !demote_free_huge_page(h, page);
+ break;
+ }
+ }
+
return rc;
}
@@ -3550,10 +3620,6 @@ static ssize_t demote_store(struct kobject *kobj,
/*
* Check for available pages to demote each time thorough the
* loop as demote_pool_huge_page will drop hugetlb_lock.
- *
- * NOTE: demote_pool_huge_page does not yet drop hugetlb_lock
- * but will when full demote functionality is added in a later
- * patch.
*/
if (nid != NUMA_NO_NODE)
nr_available = h->free_huge_pages_node[nid];
--
2.31.1
next prev parent reply other threads:[~2021-10-01 17:52 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-01 17:52 [PATCH v3 0/5] hugetlb: add demote/split page functionality Mike Kravetz
2021-10-01 17:52 ` [PATCH v3 1/5] hugetlb: add demote hugetlb page sysfs interfaces Mike Kravetz
2021-10-04 13:00 ` Oscar Salvador
2021-10-04 18:27 ` Mike Kravetz
2021-10-05 8:23 ` Oscar Salvador
2021-10-05 16:58 ` Mike Kravetz
2021-10-01 17:52 ` [PATCH v3 2/5] mm/cma: add cma_pages_valid to determine if pages are in CMA Mike Kravetz
2021-10-05 8:45 ` Oscar Salvador
2021-10-05 17:06 ` Mike Kravetz
2021-10-05 8:48 ` David Hildenbrand
2021-10-01 17:52 ` [PATCH v3 3/5] hugetlb: be sure to free demoted CMA pages to CMA Mike Kravetz
2021-10-05 9:33 ` Oscar Salvador
2021-10-05 18:57 ` Mike Kravetz
2021-10-06 7:54 ` Oscar Salvador
2021-10-06 18:27 ` Mike Kravetz
2021-10-01 17:52 ` [PATCH v3 4/5] hugetlb: add demote bool to gigantic page routines Mike Kravetz
2021-10-01 17:52 ` Mike Kravetz [this message]
2021-10-06 8:41 ` [PATCH v3 5/5] hugetlb: add hugetlb demote page support Oscar Salvador
2021-10-06 18:52 ` Mike Kravetz
2021-10-07 7:52 ` Oscar Salvador
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211001175210.45968-6-mike.kravetz@oracle.com \
--to=mike.kravetz@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@linux.ibm.com \
--cc=david@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=naoya.horiguchi@linux.dev \
--cc=osalvador@suse.de \
--cc=rientjes@google.com \
--cc=songmuchun@bytedance.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).