linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: corbet@lwn.net, mike.kravetz@oracle.com, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, viro@zeniv.linux.org.uk,
	akpm@linux-foundation.org, paulmck@kernel.org,
	mchehab+huawei@kernel.org, pawan.kumar.gupta@linux.intel.com,
	rdunlap@infradead.org, oneukum@suse.com,
	anshuman.khandual@arm.com, jroedel@suse.de,
	almasrymina@google.com, rientjes@google.com, willy@infradead.org,
	osalvador@suse.de, mhocko@suse.com
Cc: duanxiongchun@bytedance.com, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v4 04/21] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate
Date: Fri, 13 Nov 2020 18:59:35 +0800	[thread overview]
Message-ID: <20201113105952.11638-5-songmuchun@bytedance.com> (raw)
In-Reply-To: <20201113105952.11638-1-songmuchun@bytedance.com>

If the size of HugeTLB page is 2MB, we need 512 struct page structures
(8 pages) to be associated with it. As far as I know, we only use the
first 4 struct page structures. Use of first 4 struct page structures
comes from HUGETLB_CGROUP_MIN_ORDER.

For tail pages, the value of compound_head is the same. So we can reuse
first page of tail page structs. We map the virtual addresses of the
remaining 6 pages of tail page structs to the first tail page struct,
and then free these 6 pages. Therefore, we need to reserve at least 2
pages as vmemmap areas.

So we introduce a new nr_free_vmemmap_pages field in the hstate to
indicate how many vmemmap pages associated with a HugeTLB page that we
can free to buddy system.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 include/linux/hugetlb.h |   3 ++
 mm/Makefile             |   1 +
 mm/hugetlb.c            |   3 ++
 mm/hugetlb_vmemmap.c    | 108 ++++++++++++++++++++++++++++++++++++++++++++++++
 mm/hugetlb_vmemmap.h    |  20 +++++++++
 5 files changed, 135 insertions(+)
 create mode 100644 mm/hugetlb_vmemmap.c
 create mode 100644 mm/hugetlb_vmemmap.h

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d5cc5f802dd4..eed3dd3bd626 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -492,6 +492,9 @@ struct hstate {
 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
 	unsigned int free_huge_pages_node[MAX_NUMNODES];
 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+	unsigned int nr_free_vmemmap_pages;
+#endif
 #ifdef CONFIG_CGROUP_HUGETLB
 	/* cgroup control files */
 	struct cftype cgroup_files_dfl[7];
diff --git a/mm/Makefile b/mm/Makefile
index 752111587c99..2a734576bbc0 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_FRONTSWAP)	+= frontswap.o
 obj-$(CONFIG_ZSWAP)	+= zswap.o
 obj-$(CONFIG_HAS_DMA)	+= dmapool.o
 obj-$(CONFIG_HUGETLBFS)	+= hugetlb.o
+obj-$(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)	+= hugetlb_vmemmap.o
 obj-$(CONFIG_NUMA) 	+= mempolicy.o
 obj-$(CONFIG_SPARSEMEM)	+= sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 81a41aa080a5..f88032c24667 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -42,6 +42,7 @@
 #include <linux/userfaultfd_k.h>
 #include <linux/page_owner.h>
 #include "internal.h"
+#include "hugetlb_vmemmap.h"
 
 int hugetlb_max_hstate __read_mostly;
 unsigned int default_hstate_idx;
@@ -3285,6 +3286,8 @@ void __init hugetlb_add_hstate(unsigned int order)
 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
 					huge_page_size(h)/1024);
 
+	hugetlb_vmemmap_init(h);
+
 	parsed_hstate = h;
 }
 
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
new file mode 100644
index 000000000000..a6c9948302e2
--- /dev/null
+++ b/mm/hugetlb_vmemmap.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Free some vmemmap pages of HugeTLB
+ *
+ * Copyright (c) 2020, Bytedance. All rights reserved.
+ *
+ *     Author: Muchun Song <songmuchun@bytedance.com>
+ *
+ * Nowadays we track the status of physical page frames using struct page
+ * structures arranged in one or more arrays. And here exists one-to-one
+ * mapping between the physical page frame and the corresponding struct page
+ * structure.
+ *
+ * The HugeTLB support is built on top of multiple page size support that
+ * is provided by most modern architectures. For example, x86 CPUs normally
+ * support 4K and 2M (1G if architecturally supported) page sizes. Every
+ * HugeTLB has more than one struct page structure. The 2M HugeTLB has 512
+ * struct page structure and 1G HugeTLB has 4096 struct page structures. But
+ * in the core of HugeTLB only uses the first 4 (Use of first 4 struct page
+ * structures comes from HUGETLB_CGROUP_MIN_ORDER.) struct page structures to
+ * store metadata associated with each HugeTLB. The rest of the struct page
+ * structures are usually read the compound_head field which are all the same
+ * value. If we can free some struct page memory to buddy system so that we
+ * can save a lot of memory.
+ *
+ * When the system boot up, every 2M HugeTLB has 512 struct page structures
+ * which size is 8 pages(sizeof(struct page) * 512 / PAGE_SIZE).
+ *
+ *    HugeTLB                  struct pages(8 pages)         page frame(8 pages)
+ * +-----------+ ---virt_to_page---> +-----------+   mapping to   +-----------+
+ * |           |                     |     0     | -------------> |     0     |
+ * |           |                     |     1     | -------------> |     1     |
+ * |           |                     |     2     | -------------> |     2     |
+ * |           |                     |     3     | -------------> |     3     |
+ * |           |                     |     4     | -------------> |     4     |
+ * |     2M    |                     |     5     | -------------> |     5     |
+ * |           |                     |     6     | -------------> |     6     |
+ * |           |                     |     7     | -------------> |     7     |
+ * |           |                     +-----------+                +-----------+
+ * |           |
+ * |           |
+ * +-----------+
+ *
+ *
+ * When a HugeTLB is preallocated, we can change the mapping from above to
+ * bellow.
+ *
+ *    HugeTLB                  struct pages(8 pages)         page frame(8 pages)
+ * +-----------+ ---virt_to_page---> +-----------+   mapping to   +-----------+
+ * |           |                     |     0     | -------------> |     0     |
+ * |           |                     |     1     | -------------> |     1     |
+ * |           |                     |     2     | -------------> +-----------+
+ * |           |                     |     3     | -----------------^ ^ ^ ^ ^
+ * |           |                     |     4     | -------------------+ | | |
+ * |     2M    |                     |     5     | ---------------------+ | |
+ * |           |                     |     6     | -----------------------+ |
+ * |           |                     |     7     | -------------------------+
+ * |           |                     +-----------+
+ * |           |
+ * |           |
+ * +-----------+
+ *
+ * For tail pages, the value of compound_head is the same. So we can reuse
+ * first page of tail page structures. We map the virtual addresses of the
+ * remaining 6 pages of tail page structures to the first tail page structures,
+ * and then free these 6 page frames. Therefore, we need to reserve at least 2
+ * pages as vmemmap areas.
+ *
+ * When a HugeTLB is freed to the buddy system, we should allocate 6 pages for
+ * vmemmap pages and restore the previous mapping relationship.
+ */
+#define pr_fmt(fmt)	"HugeTLB Vmemmap: " fmt
+
+#include "hugetlb_vmemmap.h"
+
+/*
+ * There are 512 struct page structures(8 pages) associated with each 2MB
+ * hugetlb page. For tail pages, the value of compound_head is the same.
+ * So we can reuse first page of tail page structures. We map the virtual
+ * addresses of the remaining 6 pages of tail page structures to the first
+ * tail page struct, and then free these 6 pages. Therefore, we need to
+ * reserve at least 2 pages as vmemmap areas.
+ */
+#define RESERVE_VMEMMAP_NR		2U
+
+void __init hugetlb_vmemmap_init(struct hstate *h)
+{
+	unsigned int order = huge_page_order(h);
+	unsigned int vmemmap_pages;
+
+	vmemmap_pages = ((1 << order) * sizeof(struct page)) >> PAGE_SHIFT;
+	/*
+	 * The head page and the first tail page are not to be freed to buddy
+	 * system, the others page will map to the first tail page. So there
+	 * are (@vmemmap_pages - RESERVE_VMEMMAP_NR) pages can be freed.
+	 *
+	 * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? This is
+	 * not expected to happen unless the system is corrupted. So on the
+	 * safe side, it is only a safety net.
+	 */
+	if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
+		h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
+	else
+		h->nr_free_vmemmap_pages = 0;
+
+	pr_debug("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
+		 h->name);
+}
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
new file mode 100644
index 000000000000..40c0c7dfb60d
--- /dev/null
+++ b/mm/hugetlb_vmemmap.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Free some vmemmap pages of HugeTLB
+ *
+ * Copyright (c) 2020, Bytedance. All rights reserved.
+ *
+ *     Author: Muchun Song <songmuchun@bytedance.com>
+ */
+#ifndef _LINUX_HUGETLB_VMEMMAP_H
+#define _LINUX_HUGETLB_VMEMMAP_H
+#include <linux/hugetlb.h>
+
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+void __init hugetlb_vmemmap_init(struct hstate *h);
+#else
+static inline void hugetlb_vmemmap_init(struct hstate *h)
+{
+}
+#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
+#endif /* _LINUX_HUGETLB_VMEMMAP_H */
-- 
2.11.0


  parent reply	other threads:[~2020-11-13 11:03 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-13 10:59 [PATCH v4 00/21] Free some vmemmap pages of hugetlb page Muchun Song
2020-11-13 10:59 ` [PATCH v4 01/21] mm/memory_hotplug: Move bootmem info registration API to bootmem_info.c Muchun Song
2020-11-16 13:50   ` Oscar Salvador
2020-11-13 10:59 ` [PATCH v4 02/21] mm/memory_hotplug: Move {get,put}_page_bootmem() " Muchun Song
2020-11-16 13:52   ` Oscar Salvador
2020-11-13 10:59 ` [PATCH v4 03/21] mm/hugetlb: Introduce a new config HUGETLB_PAGE_FREE_VMEMMAP Muchun Song
2020-11-18 22:38   ` Mike Kravetz
2020-11-19  2:57     ` [External] " Muchun Song
2020-11-13 10:59 ` Muchun Song [this message]
2020-11-16 13:33   ` [PATCH v4 04/21] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate Oscar Salvador
2020-11-16 15:40     ` [External] " Muchun Song
2020-11-18 22:54     ` Mike Kravetz
2020-11-18 23:48   ` Mike Kravetz
2020-11-19  3:00     ` [External] " Muchun Song
2020-11-13 10:59 ` [PATCH v4 05/21] mm/hugetlb: Introduce pgtable allocation/freeing helpers Muchun Song
2020-11-17 15:06   ` Oscar Salvador
2020-11-17 15:29     ` [External] " Muchun Song
2020-11-19  6:17     ` Muchun Song
2020-11-19 23:21       ` Mike Kravetz
2020-11-20  2:52         ` Muchun Song
2020-11-19 23:37   ` Mike Kravetz
2020-11-13 10:59 ` [PATCH v4 06/21] mm/bootmem_info: Introduce {free,prepare}_vmemmap_page() Muchun Song
2020-11-13 10:59 ` [PATCH v4 07/21] mm/bootmem_info: Combine bootmem info and type into page->freelist Muchun Song
2020-11-13 10:59 ` [PATCH v4 08/21] mm/hugetlb: Initialize page table lock for vmemmap Muchun Song
2020-11-13 10:59 ` [PATCH v4 09/21] mm/hugetlb: Free the vmemmap pages associated with each hugetlb page Muchun Song
2020-11-17  9:54   ` Song Bao Hua (Barry Song)
2020-11-17 10:26     ` [External] " Muchun Song
2020-11-18  3:21       ` Song Bao Hua (Barry Song)
2020-11-13 10:59 ` [PATCH v4 10/21] mm/hugetlb: Defer freeing of hugetlb pages Muchun Song
2020-11-13 10:59 ` [PATCH v4 11/21] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page Muchun Song
2020-11-13 10:59 ` [PATCH v4 12/21] mm/hugetlb: Introduce remap_huge_page_pmd_vmemmap helper Muchun Song
2020-11-13 10:59 ` [PATCH v4 13/21] mm/hugetlb: Use PG_slab to indicate split pmd Muchun Song
2020-11-13 10:59 ` [PATCH v4 14/21] mm/hugetlb: Support freeing vmemmap pages of gigantic page Muchun Song
2020-11-13 10:59 ` [PATCH v4 15/21] mm/hugetlb: Set the PageHWPoison to the raw error page Muchun Song
2020-11-13 10:59 ` [PATCH v4 16/21] mm/hugetlb: Flush work when dissolving hugetlb page Muchun Song
2020-11-13 10:59 ` [PATCH v4 17/21] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap Muchun Song
2020-11-13 10:59 ` [PATCH v4 18/21] mm/hugetlb: Merge pte to huge pmd only for gigantic page Muchun Song
2020-11-13 10:59 ` [PATCH v4 19/21] mm/hugetlb: Gather discrete indexes of tail page Muchun Song
2020-11-13 10:59 ` [PATCH v4 20/21] mm/hugetlb: Add BUILD_BUG_ON to catch invalid usage of tail struct page Muchun Song
2020-11-13 10:59 ` [PATCH v4 21/21] mm/hugetlb: Disable freeing vmemmap if struct page size is not power of two Muchun Song
2020-11-17 10:15 ` [PATCH v4 00/21] Free some vmemmap pages of hugetlb page Song Bao Hua (Barry Song)
2020-11-17 10:49   ` [External] " Muchun Song
2020-11-17 11:07     ` Song Bao Hua (Barry Song)
2020-11-17 16:29       ` Muchun Song
2020-11-17 19:22         ` Matthew Wilcox
2020-11-18  2:43           ` Muchun Song
2020-11-17 19:45         ` Oscar Salvador
2020-11-18  3:27           ` Muchun Song
2020-11-18  3:27           ` Song Bao Hua (Barry Song)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201113105952.11638-5-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=almasrymina@google.com \
    --cc=anshuman.khandual@arm.com \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=hpa@zytor.com \
    --cc=jroedel@suse.de \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mchehab+huawei@kernel.org \
    --cc=mhocko@suse.com \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=oneukum@suse.com \
    --cc=osalvador@suse.de \
    --cc=paulmck@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=peterz@infradead.org \
    --cc=rdunlap@infradead.org \
    --cc=rientjes@google.com \
    --cc=tglx@linutronix.de \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).