From: Muchun Song <songmuchun@bytedance.com>
To: corbet@lwn.net, mike.kravetz@oracle.com, tglx@linutronix.de,
mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com,
dave.hansen@linux.intel.com, luto@kernel.org,
peterz@infradead.org, viro@zeniv.linux.org.uk,
akpm@linux-foundation.org, paulmck@kernel.org,
mchehab+huawei@kernel.org, pawan.kumar.gupta@linux.intel.com,
rdunlap@infradead.org, oneukum@suse.com,
anshuman.khandual@arm.com, jroedel@suse.de,
almasrymina@google.com, rientjes@google.com, willy@infradead.org,
osalvador@suse.de, mhocko@suse.com, song.bao.hua@hisilicon.com,
david@redhat.com
Cc: duanxiongchun@bytedance.com, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-fsdevel@vger.kernel.org,
Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v9 04/11] mm/hugetlb: Defer freeing of HugeTLB pages
Date: Sun, 13 Dec 2020 23:45:27 +0800 [thread overview]
Message-ID: <20201213154534.54826-5-songmuchun@bytedance.com> (raw)
In-Reply-To: <20201213154534.54826-1-songmuchun@bytedance.com>
In the subsequent patch, we will allocate the vmemmap pages when free
HugeTLB pages. But update_and_free_page() is called from a non-task
context(and hold hugetlb_lock), so we can defer the actual freeing in
a workqueue to prevent use GFP_ATOMIC to allocate the vmemmap pages.
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
mm/hugetlb.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++----
mm/hugetlb_vmemmap.c | 12 --------
mm/hugetlb_vmemmap.h | 17 ++++++++++++
3 files changed, 88 insertions(+), 18 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 140135fc8113..0ff9b90e524f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1292,15 +1292,76 @@ static inline void destroy_compound_gigantic_page(struct page *page,
unsigned int order) { }
#endif
-static void update_and_free_page(struct hstate *h, struct page *page)
+static void __free_hugepage(struct hstate *h, struct page *page);
+
+/*
+ * As update_and_free_page() is be called from a non-task context(and hold
+ * hugetlb_lock), we can defer the actual freeing in a workqueue to prevent
+ * use GFP_ATOMIC to allocate a lot of vmemmap pages.
+ *
+ * update_hpage_vmemmap_workfn() locklessly retrieves the linked list of
+ * pages to be freed and frees them one-by-one. As the page->mapping pointer
+ * is going to be cleared in update_hpage_vmemmap_workfn() anyway, it is
+ * reused as the llist_node structure of a lockless linked list of huge
+ * pages to be freed.
+ */
+static LLIST_HEAD(hpage_update_freelist);
+
+static void update_hpage_vmemmap_workfn(struct work_struct *work)
{
- int i;
+ struct llist_node *node;
+ struct page *page;
+
+ node = llist_del_all(&hpage_update_freelist);
+ while (node) {
+ page = container_of((struct address_space **)node,
+ struct page, mapping);
+ node = node->next;
+ page->mapping = NULL;
+ __free_hugepage(page_hstate(page), page);
+
+ cond_resched();
+ }
+}
+static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);
+
+static inline void __update_and_free_page(struct hstate *h, struct page *page)
+{
+ /* No need to allocate vmemmap pages */
+ if (!free_vmemmap_pages_per_hpage(h)) {
+ __free_hugepage(h, page);
+ return;
+ }
+
+ /*
+ * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap
+ * pages.
+ *
+ * Only call schedule_work() if hpage_update_freelist is previously
+ * empty. Otherwise, schedule_work() had been called but the workfn
+ * hasn't retrieved the list yet.
+ */
+ if (llist_add((struct llist_node *)&page->mapping,
+ &hpage_update_freelist))
+ schedule_work(&hpage_update_work);
+}
+
+static void update_and_free_page(struct hstate *h, struct page *page)
+{
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
+
+ __update_and_free_page(h, page);
+}
+
+static void __free_hugepage(struct hstate *h, struct page *page)
+{
+ int i;
+
for (i = 0; i < pages_per_huge_page(h); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
@@ -1313,13 +1374,17 @@ static void update_and_free_page(struct hstate *h, struct page *page)
set_page_refcounted(page);
if (hstate_is_gigantic(h)) {
/*
- * Temporarily drop the hugetlb_lock, because
- * we might block in free_gigantic_page().
+ * Temporarily drop the hugetlb_lock only when this type of
+ * HugeTLB page does not support vmemmap optimization (which
+ * contex do not hold the hugetlb_lock), because we might block
+ * in free_gigantic_page().
*/
- spin_unlock(&hugetlb_lock);
+ if (!free_vmemmap_pages_per_hpage(h))
+ spin_unlock(&hugetlb_lock);
destroy_compound_gigantic_page(page, huge_page_order(h));
free_gigantic_page(page, huge_page_order(h));
- spin_lock(&hugetlb_lock);
+ if (!free_vmemmap_pages_per_hpage(h))
+ spin_lock(&hugetlb_lock);
} else {
__free_pages(page, huge_page_order(h));
}
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 5a714bd60d6b..6d4e77a2b6c7 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -180,18 +180,6 @@
#define RESERVE_VMEMMAP_NR 2U
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
-/*
- * How many vmemmap pages associated with a HugeTLB page that can be freed
- * to the buddy allocator.
- *
- * Todo: Returns zero for now, which means the feature is disabled. We will
- * enable it once all the infrastructure is there.
- */
-static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
-{
- return 0;
-}
-
static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
{
return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 6923f03534d5..01f8637adbe0 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -12,9 +12,26 @@
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
void free_huge_page_vmemmap(struct hstate *h, struct page *head);
+
+/*
+ * How many vmemmap pages associated with a HugeTLB page that can be freed
+ * to the buddy allocator.
+ *
+ * Todo: Returns zero for now, which means the feature is disabled. We will
+ * enable it once all the infrastructure is there.
+ */
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+ return 0;
+}
#else
static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
{
}
+
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+ return 0;
+}
#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
#endif /* _LINUX_HUGETLB_VMEMMAP_H */
--
2.11.0
next prev parent reply other threads:[~2020-12-13 15:47 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-13 15:45 [PATCH v9 00/11] Free some vmemmap pages of HugeTLB page Muchun Song
2020-12-13 15:45 ` [PATCH v9 01/11] mm/memory_hotplug: Factor out bootmem core functions to bootmem_info.c Muchun Song
2020-12-13 15:45 ` [PATCH v9 02/11] mm/hugetlb: Introduce a new config HUGETLB_PAGE_FREE_VMEMMAP Muchun Song
2020-12-16 1:03 ` Mike Kravetz
2020-12-16 3:24 ` [External] " Muchun Song
2020-12-16 3:45 ` Mike Kravetz
2020-12-16 3:52 ` [External] " Muchun Song
2020-12-13 15:45 ` [PATCH v9 03/11] mm/hugetlb: Free the vmemmap pages associated with each HugeTLB page Muchun Song
2020-12-16 13:06 ` Oscar Salvador
2020-12-16 13:15 ` [External] " Muchun Song
2020-12-16 22:08 ` Mike Kravetz
2020-12-16 22:25 ` Oscar Salvador
2020-12-16 22:49 ` Mike Kravetz
2020-12-17 6:54 ` [External] " Muchun Song
2020-12-17 9:05 ` Muchun Song
2020-12-17 4:06 ` Muchun Song
2020-12-13 15:45 ` Muchun Song [this message]
2020-12-16 23:48 ` [PATCH v9 04/11] mm/hugetlb: Defer freeing of HugeTLB pages Mike Kravetz
2020-12-17 3:19 ` [External] " Muchun Song
2020-12-13 15:45 ` [PATCH v9 05/11] mm/hugetlb: Allocate the vmemmap pages associated with each HugeTLB page Muchun Song
2020-12-17 1:17 ` Mike Kravetz
2020-12-17 3:22 ` [External] " Muchun Song
2020-12-13 15:45 ` [PATCH v9 06/11] mm/hugetlb: Set the PageHWPoison to the raw error page Muchun Song
2020-12-16 13:28 ` Oscar Salvador
2020-12-16 13:51 ` [External] " Muchun Song
2020-12-16 13:30 ` Oscar Salvador
2020-12-13 15:45 ` [PATCH v9 07/11] mm/hugetlb: Flush work when dissolving hugetlb page Muchun Song
2020-12-13 15:45 ` [PATCH v9 08/11] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap Muchun Song
2020-12-16 14:40 ` Oscar Salvador
2020-12-16 16:04 ` [External] " Muchun Song
2020-12-16 22:10 ` Oscar Salvador
2020-12-17 2:45 ` Muchun Song
2020-12-13 15:45 ` [PATCH v9 09/11] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate Muchun Song
2020-12-16 13:43 ` Oscar Salvador
2020-12-16 13:56 ` [External] " Muchun Song
2020-12-16 22:12 ` Oscar Salvador
2020-12-17 8:34 ` Muchun Song
2020-12-13 15:45 ` [PATCH v9 10/11] mm/hugetlb: Gather discrete indexes of tail page Muchun Song
2020-12-16 14:03 ` Oscar Salvador
2020-12-16 14:26 ` [External] " Muchun Song
2020-12-13 15:45 ` [PATCH v9 11/11] mm/hugetlb: Optimize the code with the help of the compiler Muchun Song
2020-12-17 10:31 ` Oscar Salvador
2020-12-17 10:42 ` [External] " Muchun Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201213154534.54826-5-songmuchun@bytedance.com \
--to=songmuchun@bytedance.com \
--cc=akpm@linux-foundation.org \
--cc=almasrymina@google.com \
--cc=anshuman.khandual@arm.com \
--cc=bp@alien8.de \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=duanxiongchun@bytedance.com \
--cc=hpa@zytor.com \
--cc=jroedel@suse.de \
--cc=linux-doc@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mchehab+huawei@kernel.org \
--cc=mhocko@suse.com \
--cc=mike.kravetz@oracle.com \
--cc=mingo@redhat.com \
--cc=oneukum@suse.com \
--cc=osalvador@suse.de \
--cc=paulmck@kernel.org \
--cc=pawan.kumar.gupta@linux.intel.com \
--cc=peterz@infradead.org \
--cc=rdunlap@infradead.org \
--cc=rientjes@google.com \
--cc=song.bao.hua@hisilicon.com \
--cc=tglx@linutronix.de \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).