All of lore.kernel.org
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: corbet@lwn.net, mike.kravetz@oracle.com, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, viro@zeniv.linux.org.uk,
	akpm@linux-foundation.org, paulmck@kernel.org,
	mchehab+huawei@kernel.org, pawan.kumar.gupta@linux.intel.com,
	rdunlap@infradead.org, oneukum@suse.com,
	anshuman.khandual@arm.com, jroedel@suse.de,
	almasrymina@google.com, rientjes@google.com, willy@infradead.org,
	osalvador@suse.de, mhocko@suse.com, song.bao.hua@hisilicon.com,
	david@redhat.com
Cc: duanxiongchun@bytedance.com, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v8 10/12] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate
Date: Thu, 10 Dec 2020 11:55:24 +0800	[thread overview]
Message-ID: <20201210035526.38938-11-songmuchun@bytedance.com> (raw)
In-Reply-To: <20201210035526.38938-1-songmuchun@bytedance.com>

All the infrastructure is ready, so we introduce nr_free_vmemmap_pages
field in the hstate to indicate how many vmemmap pages associated with
a HugeTLB page that we can free to buddy allocator. And initialize it
in the hugetlb_vmemmap_init(). This patch is actual enablement of the
feature.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 include/linux/hugetlb.h |  3 +++
 mm/hugetlb.c            |  1 +
 mm/hugetlb_vmemmap.c    | 29 +++++++++++++++++++++++++++++
 mm/hugetlb_vmemmap.h    | 10 ++++++----
 4 files changed, 39 insertions(+), 4 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7f47f0eeca3b..66d82ae7b712 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -492,6 +492,9 @@ struct hstate {
 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
 	unsigned int free_huge_pages_node[MAX_NUMNODES];
 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+	unsigned int nr_free_vmemmap_pages;
+#endif
 #ifdef CONFIG_CGROUP_HUGETLB
 	/* cgroup control files */
 	struct cftype cgroup_files_dfl[7];
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2e7a59b44364..6440367a71b6 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3327,6 +3327,7 @@ void __init hugetlb_add_hstate(unsigned int order)
 	h->next_nid_to_free = first_memory_node;
 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
 					huge_page_size(h)/1024);
+	hugetlb_vmemmap_init(h);
 
 	parsed_hstate = h;
 }
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index f0926b382338..36a2e2db7913 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -421,3 +421,32 @@ void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 
 	free_vmemmap_page_list(&vmemmap_pages);
 }
+
+void __init hugetlb_vmemmap_init(struct hstate *h)
+{
+	unsigned int nr_pages = pages_per_huge_page(h);
+	unsigned int vmemmap_pages;
+
+	/* We cannot optimize if a "struct page" crosses page boundaries. */
+	if (!is_power_of_2(sizeof(struct page)))
+		return;
+
+	if (!hugetlb_free_vmemmap_enabled)
+		return;
+
+	vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
+	/*
+	 * The head page and the first tail page are not to be freed to buddy
+	 * system, the others page will map to the first tail page. So there
+	 * are the remaining pages that can be freed.
+	 *
+	 * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
+	 * on some architectures (e.g. aarch64). See Documentation/arm64/
+	 * hugetlbpage.rst for more details.
+	 */
+	if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
+		h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
+
+	pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
+		h->name);
+}
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 8fd57c49e230..0a1c0d33a316 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -11,21 +11,23 @@
 #include <linux/hugetlb.h>
 
 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+void hugetlb_vmemmap_init(struct hstate *h);
 void alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
 void free_huge_page_vmemmap(struct hstate *h, struct page *head);
 
 /*
  * How many vmemmap pages associated with a HugeTLB page that can be freed
  * to the buddy allocator.
- *
- * Todo: Now it is zero, because all infrastructure is not ready. Once all the
- * infrastructure is ready, we will rework this function to support the feature.
  */
 static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
 {
-	return 0;
+	return h->nr_free_vmemmap_pages;
 }
 #else
+static inline void hugetlb_vmemmap_init(struct hstate *h)
+{
+}
+
 static inline void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 }
-- 
2.11.0


  parent reply	other threads:[~2020-12-10  4:02 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-10  3:55 [PATCH v8 00/12] Free some vmemmap pages of HugeTLB page Muchun Song
2020-12-10  3:55 ` [PATCH v8 01/12] mm/memory_hotplug: Factor out bootmem core functions to bootmem_info.c Muchun Song
2020-12-10  3:55 ` [PATCH v8 02/12] mm/hugetlb: Introduce a new config HUGETLB_PAGE_FREE_VMEMMAP Muchun Song
2020-12-10  3:55 ` [PATCH v8 03/12] mm/bootmem_info: Introduce free_bootmem_page helper Muchun Song
2020-12-10 14:15   ` Oscar Salvador
2020-12-10 15:22     ` [External] " Muchun Song
2020-12-10 15:22       ` Muchun Song
2020-12-10 15:26       ` Muchun Song
2020-12-10 15:26         ` Muchun Song
2020-12-10  3:55 ` [PATCH v8 04/12] mm/hugetlb: Free the vmemmap pages associated with each HugeTLB page Muchun Song
2020-12-10 14:42   ` Oscar Salvador
2020-12-10 14:44     ` Oscar Salvador
2020-12-10 15:58       ` [External] " Muchun Song
2020-12-10 15:58         ` Muchun Song
2020-12-10 15:57     ` Muchun Song
2020-12-10 15:57       ` Muchun Song
2020-12-10  3:55 ` [PATCH v8 05/12] mm/hugetlb: Defer freeing of HugeTLB pages Muchun Song
2020-12-10  3:55 ` [PATCH v8 06/12] mm/hugetlb: Allocate the vmemmap pages associated with each HugeTLB page Muchun Song
2020-12-11  9:35   ` Oscar Salvador
2020-12-11 10:52     ` David Hildenbrand
2020-12-11 13:01     ` [External] " Muchun Song
2020-12-11 13:01       ` Muchun Song
2020-12-10  3:55 ` [PATCH v8 07/12] mm/hugetlb: Set the PageHWPoison to the raw error page Muchun Song
2020-12-10 11:11   ` Muchun Song
2020-12-10 11:11     ` Muchun Song
2020-12-11 13:36   ` Oscar Salvador
2020-12-11 14:08     ` [External] " Muchun Song
2020-12-11 14:08       ` Muchun Song
2020-12-10  3:55 ` [PATCH v8 08/12] mm/hugetlb: Flush work when dissolving hugetlb page Muchun Song
2020-12-10  3:55 ` [PATCH v8 09/12] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap Muchun Song
2020-12-10 10:04   ` Oscar Salvador
2020-12-10 12:26     ` [External] " Muchun Song
2020-12-10 12:26       ` Muchun Song
2020-12-10  3:55 ` Muchun Song [this message]
2020-12-10 10:15   ` [PATCH v8 10/12] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate Oscar Salvador
2020-12-10 12:32     ` [External] " Muchun Song
2020-12-10 12:32       ` Muchun Song
2020-12-10  3:55 ` [PATCH v8 11/12] mm/hugetlb: Gather discrete indexes of tail page Muchun Song
2020-12-10  3:55 ` [PATCH v8 12/12] mm/hugetlb: Optimize the code with the help of the compiler Muchun Song
2020-12-10 10:25   ` Oscar Salvador
2020-12-10 12:14     ` [External] " Muchun Song
2020-12-10 12:14       ` Muchun Song
2020-12-10 13:16       ` Oscar Salvador
2020-12-10 13:29         ` Muchun Song
2020-12-10 13:29           ` Muchun Song
2020-12-10 16:19           ` Muchun Song
2020-12-10 16:19             ` Muchun Song
2020-12-10  9:18 ` [PATCH v8 00/12] Free some vmemmap pages of HugeTLB page Oscar Salvador

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201210035526.38938-11-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=almasrymina@google.com \
    --cc=anshuman.khandual@arm.com \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=hpa@zytor.com \
    --cc=jroedel@suse.de \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mchehab+huawei@kernel.org \
    --cc=mhocko@suse.com \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=oneukum@suse.com \
    --cc=osalvador@suse.de \
    --cc=paulmck@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=peterz@infradead.org \
    --cc=rdunlap@infradead.org \
    --cc=rientjes@google.com \
    --cc=song.bao.hua@hisilicon.com \
    --cc=tglx@linutronix.de \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.