All of lore.kernel.org
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: corbet@lwn.net, mike.kravetz@oracle.com, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, viro@zeniv.linux.org.uk,
	akpm@linux-foundation.org, paulmck@kernel.org,
	mchehab+huawei@kernel.org, pawan.kumar.gupta@linux.intel.com,
	rdunlap@infradead.org, oneukum@suse.com,
	anshuman.khandual@arm.com, jroedel@suse.de,
	almasrymina@google.com, rientjes@google.com, willy@infradead.org,
	osalvador@suse.de, mhocko@suse.com, song.bao.hua@hisilicon.com
Cc: duanxiongchun@bytedance.com, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v5 09/21] mm/hugetlb: Free the vmemmap pages associated with each hugetlb page
Date: Fri, 20 Nov 2020 14:43:13 +0800	[thread overview]
Message-ID: <20201120064325.34492-10-songmuchun@bytedance.com> (raw)
In-Reply-To: <20201120064325.34492-1-songmuchun@bytedance.com>

When we allocate a hugetlb page from the buddy, we should free the
unused vmemmap pages associated with it. We can do that in the
prep_new_huge_page().

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 arch/x86/include/asm/hugetlb.h          |   9 ++
 arch/x86/include/asm/pgtable_64_types.h |   8 ++
 mm/hugetlb.c                            |  16 +++
 mm/hugetlb_vmemmap.c                    | 188 ++++++++++++++++++++++++++++++++
 mm/hugetlb_vmemmap.h                    |   5 +
 5 files changed, 226 insertions(+)

diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 1721b1aadeb1..c601fe042832 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -4,6 +4,15 @@
 
 #include <asm/page.h>
 #include <asm-generic/hugetlb.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+#define vmemmap_pmd_huge vmemmap_pmd_huge
+static inline bool vmemmap_pmd_huge(pmd_t *pmd)
+{
+	return pmd_large(*pmd);
+}
+#endif
 
 #define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
 
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 52e5f5f2240d..bedbd2e7d06c 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -139,6 +139,14 @@ extern unsigned int ptrs_per_p4d;
 # define VMEMMAP_START		__VMEMMAP_BASE_L4
 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
 
+/*
+ * VMEMMAP_SIZE - allows the whole linear region to be covered by
+ *                a struct page array.
+ */
+#define VMEMMAP_SIZE		(1UL << (__VIRTUAL_MASK_SHIFT - PAGE_SHIFT - \
+					 1 + ilog2(sizeof(struct page))))
+#define VMEMMAP_END		(VMEMMAP_START + VMEMMAP_SIZE)
+
 #define VMALLOC_END		(VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
 
 #define MODULES_VADDR		(__START_KERNEL_map + KERNEL_IMAGE_SIZE)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f88032c24667..a0ce6f33a717 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1499,6 +1499,14 @@ void free_huge_page(struct page *page)
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
+	free_huge_page_vmemmap(h, page);
+	/*
+	 * Because we store preallocated pages on @page->lru,
+	 * vmemmap_pgtable_free() must be called before the
+	 * initialization of @page->lru in INIT_LIST_HEAD().
+	 */
+	vmemmap_pgtable_free(page);
+
 	INIT_LIST_HEAD(&page->lru);
 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
 	set_hugetlb_cgroup(page, NULL);
@@ -1751,6 +1759,14 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
 	if (!page)
 		return NULL;
 
+	if (vmemmap_pgtable_prealloc(h, page)) {
+		if (hstate_is_gigantic(h))
+			free_gigantic_page(page, huge_page_order(h));
+		else
+			put_page(page);
+		return NULL;
+	}
+
 	if (hstate_is_gigantic(h))
 		prep_compound_gigantic_page(page, huge_page_order(h));
 	prep_new_huge_page(h, page, page_to_nid(page));
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index bc8546df4a51..6f8a735e0dd3 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -102,6 +102,7 @@
 #include <linux/pagewalk.h>
 #include <linux/mmzone.h>
 #include <linux/list.h>
+#include <linux/bootmem_info.h>
 #include <asm/pgalloc.h>
 #include "hugetlb_vmemmap.h"
 
@@ -114,6 +115,8 @@
  * these page frames. Therefore, we need to reserve two pages as vmemmap areas.
  */
 #define RESERVE_VMEMMAP_NR		2U
+#define RESERVE_VMEMMAP_SIZE		(RESERVE_VMEMMAP_NR << PAGE_SHIFT)
+#define TAIL_PAGE_REUSE			-1
 
 #ifndef VMEMMAP_HPAGE_SHIFT
 #define VMEMMAP_HPAGE_SHIFT		HPAGE_SHIFT
@@ -123,6 +126,21 @@
 #define VMEMMAP_HPAGE_SIZE		((1UL) << VMEMMAP_HPAGE_SHIFT)
 #define VMEMMAP_HPAGE_MASK		(~(VMEMMAP_HPAGE_SIZE - 1))
 
+#define vmemmap_hpage_addr_end(addr, end)				 \
+({									 \
+	unsigned long __boundary;					 \
+	__boundary = ((addr) + VMEMMAP_HPAGE_SIZE) & VMEMMAP_HPAGE_MASK; \
+	(__boundary - 1 < (end) - 1) ? __boundary : (end);		 \
+})
+
+#ifndef vmemmap_pmd_huge
+#define vmemmap_pmd_huge vmemmap_pmd_huge
+static inline bool vmemmap_pmd_huge(pmd_t *pmd)
+{
+	return pmd_huge(*pmd);
+}
+#endif
+
 static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
 {
 	return h->nr_free_vmemmap_pages;
@@ -189,6 +207,176 @@ int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page)
 	return -ENOMEM;
 }
 
+/*
+ * Walk a vmemmap address to the pmd it maps.
+ */
+static pmd_t *vmemmap_to_pmd(unsigned long page)
+{
+	pgd_t *pgd;
+	p4d_t *p4d;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	if (page < VMEMMAP_START || page >= VMEMMAP_END)
+		return NULL;
+
+	pgd = pgd_offset_k(page);
+	if (pgd_none(*pgd))
+		return NULL;
+	p4d = p4d_offset(pgd, page);
+	if (p4d_none(*p4d))
+		return NULL;
+	pud = pud_offset(p4d, page);
+
+	if (pud_none(*pud) || pud_bad(*pud))
+		return NULL;
+	pmd = pmd_offset(pud, page);
+
+	return pmd;
+}
+
+static inline spinlock_t *vmemmap_pmd_lock(pmd_t *pmd)
+{
+	return pmd_lock(&init_mm, pmd);
+}
+
+static inline int freed_vmemmap_hpage(struct page *page)
+{
+	return atomic_read(&page->_mapcount) + 1;
+}
+
+static inline int freed_vmemmap_hpage_inc(struct page *page)
+{
+	return atomic_inc_return_relaxed(&page->_mapcount) + 1;
+}
+
+static inline int freed_vmemmap_hpage_dec(struct page *page)
+{
+	return atomic_dec_return_relaxed(&page->_mapcount) + 1;
+}
+
+static inline void free_vmemmap_page_list(struct list_head *list)
+{
+	struct page *page, *next;
+
+	list_for_each_entry_safe(page, next, list, lru) {
+		list_del(&page->lru);
+		free_vmemmap_page(page);
+	}
+}
+
+static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
+					 unsigned long start,
+					 unsigned long end,
+					 struct list_head *free_pages)
+{
+	/* Make the tail pages are mapped read-only. */
+	pgprot_t pgprot = PAGE_KERNEL_RO;
+	pte_t entry = mk_pte(reuse, pgprot);
+	unsigned long addr;
+
+	for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
+		struct page *page;
+		pte_t old = *ptep;
+
+		VM_WARN_ON(!pte_present(old));
+		page = pte_page(old);
+		list_add(&page->lru, free_pages);
+
+		set_pte_at(&init_mm, addr, ptep, entry);
+	}
+}
+
+static void __free_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+					 unsigned long addr,
+					 struct list_head *free_pages)
+{
+	unsigned long next;
+	unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
+	unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
+	struct page *reuse = NULL;
+
+	addr = start;
+	do {
+		pte_t *ptep;
+
+		ptep = pte_offset_kernel(pmd, addr);
+		if (!reuse)
+			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
+
+		next = vmemmap_hpage_addr_end(addr, end);
+		__free_huge_page_pte_vmemmap(reuse, ptep, addr, next,
+					     free_pages);
+	} while (pmd++, addr = next, addr != end);
+
+	flush_tlb_kernel_range(start, end);
+}
+
+static void split_vmemmap_pmd(pmd_t *pmd, pte_t *pte_p, unsigned long addr)
+{
+	int i;
+	pgprot_t pgprot = PAGE_KERNEL;
+	struct mm_struct *mm = &init_mm;
+	struct page *page;
+	pmd_t old_pmd, _pmd;
+
+	old_pmd = READ_ONCE(*pmd);
+	page = pmd_page(old_pmd);
+	pmd_populate_kernel(mm, &_pmd, pte_p);
+
+	for (i = 0; i < VMEMMAP_HPAGE_NR; i++, addr += PAGE_SIZE) {
+		pte_t entry, *pte;
+
+		entry = mk_pte(page + i, pgprot);
+		pte = pte_offset_kernel(&_pmd, addr);
+		VM_BUG_ON(!pte_none(*pte));
+		set_pte_at(mm, addr, pte, entry);
+	}
+
+	/* make pte visible before pmd */
+	smp_wmb();
+	pmd_populate_kernel(mm, pmd, pte_p);
+}
+
+static void split_vmemmap_huge_page(struct page *head, pmd_t *pmd)
+{
+	struct page *pte_page, *t_page;
+	unsigned long start = (unsigned long)head & VMEMMAP_HPAGE_MASK;
+	unsigned long addr = start;
+
+	list_for_each_entry_safe(pte_page, t_page, &head->lru, lru) {
+		list_del(&pte_page->lru);
+		VM_BUG_ON(freed_vmemmap_hpage(pte_page));
+		split_vmemmap_pmd(pmd++, page_to_virt(pte_page), addr);
+		addr += VMEMMAP_HPAGE_SIZE;
+	}
+
+	flush_tlb_kernel_range(start, addr);
+}
+
+void free_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+	pmd_t *pmd;
+	spinlock_t *ptl;
+	LIST_HEAD(free_pages);
+
+	if (!free_vmemmap_pages_per_hpage(h))
+		return;
+
+	pmd = vmemmap_to_pmd((unsigned long)head);
+	BUG_ON(!pmd);
+
+	ptl = vmemmap_pmd_lock(pmd);
+	if (vmemmap_pmd_huge(pmd))
+		split_vmemmap_huge_page(head, pmd);
+
+	__free_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages);
+	freed_vmemmap_hpage_inc(pmd_page(*pmd));
+	spin_unlock(ptl);
+
+	free_vmemmap_page_list(&free_pages);
+}
+
 void __init hugetlb_vmemmap_init(struct hstate *h)
 {
 	unsigned int order = huge_page_order(h);
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 9eca6879c0a4..a9425d94ed8b 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -14,6 +14,7 @@
 void __init hugetlb_vmemmap_init(struct hstate *h);
 int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page);
 void vmemmap_pgtable_free(struct page *page);
+void free_huge_page_vmemmap(struct hstate *h, struct page *head);
 #else
 static inline void hugetlb_vmemmap_init(struct hstate *h)
 {
@@ -27,5 +28,9 @@ static inline int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page)
 static inline void vmemmap_pgtable_free(struct page *page)
 {
 }
+
+static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+}
 #endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
 #endif /* _LINUX_HUGETLB_VMEMMAP_H */
-- 
2.11.0


  parent reply	other threads:[~2020-11-20  6:48 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-20  6:43 [PATCH v5 00/21] Free some vmemmap pages of hugetlb page Muchun Song
2020-11-20  6:43 ` [PATCH v5 01/21] mm/memory_hotplug: Move bootmem info registration API to bootmem_info.c Muchun Song
2020-11-20  6:43 ` [PATCH v5 02/21] mm/memory_hotplug: Move {get,put}_page_bootmem() " Muchun Song
2020-11-20  6:43 ` [PATCH v5 03/21] mm/hugetlb: Introduce a new config HUGETLB_PAGE_FREE_VMEMMAP Muchun Song
2020-11-20  7:49   ` Michal Hocko
2020-11-20  8:35     ` [External] " Muchun Song
2020-11-20  8:35       ` Muchun Song
2020-11-20  8:47       ` Michal Hocko
2020-11-20  8:53         ` Muchun Song
2020-11-20  8:53           ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 04/21] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate Muchun Song
2020-11-20  6:43 ` [PATCH v5 05/21] mm/hugetlb: Introduce pgtable allocation/freeing helpers Muchun Song
2020-11-20  6:43 ` [PATCH v5 06/21] mm/bootmem_info: Introduce {free,prepare}_vmemmap_page() Muchun Song
2020-11-20  6:43 ` [PATCH v5 07/21] mm/bootmem_info: Combine bootmem info and type into page->freelist Muchun Song
2020-11-20  6:43 ` [PATCH v5 08/21] mm/hugetlb: Initialize page table lock for vmemmap Muchun Song
2020-11-20  6:43 ` Muchun Song [this message]
2020-11-20  6:43 ` [PATCH v5 10/21] mm/hugetlb: Defer freeing of hugetlb pages Muchun Song
2020-11-20  6:43 ` [PATCH v5 11/21] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page Muchun Song
2020-11-20  8:11   ` Michal Hocko
2020-11-20  8:51     ` [External] " Muchun Song
2020-11-20  8:51       ` Muchun Song
2020-11-20  9:28       ` Michal Hocko
2020-11-20  9:37         ` Muchun Song
2020-11-20  9:37           ` Muchun Song
2020-11-20 11:10           ` Michal Hocko
2020-11-20 11:56             ` Muchun Song
2020-11-20 11:56               ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 12/21] mm/hugetlb: Introduce remap_huge_page_pmd_vmemmap helper Muchun Song
2020-11-20  6:43 ` [PATCH v5 13/21] mm/hugetlb: Use PG_slab to indicate split pmd Muchun Song
2020-11-20  8:16   ` Michal Hocko
2020-11-20  9:30     ` [External] " Muchun Song
2020-11-20  9:30       ` Muchun Song
2020-11-23  7:48       ` Michal Hocko
2020-11-23  8:01         ` Muchun Song
2020-11-23  8:01           ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 14/21] mm/hugetlb: Support freeing vmemmap pages of gigantic page Muchun Song
2020-11-20  6:43 ` [PATCH v5 15/21] mm/hugetlb: Set the PageHWPoison to the raw error page Muchun Song
2020-11-20  8:19   ` Michal Hocko
2020-11-20 10:32     ` [External] " Muchun Song
2020-11-20 10:32       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 16/21] mm/hugetlb: Flush work when dissolving hugetlb page Muchun Song
2020-11-20  8:20   ` Michal Hocko
2020-11-20  6:43 ` [PATCH v5 17/21] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap Muchun Song
2020-11-20  8:22   ` Michal Hocko
2020-11-20 10:39     ` [External] " Muchun Song
2020-11-20 10:39       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 18/21] mm/hugetlb: Merge pte to huge pmd only for gigantic page Muchun Song
2020-11-20  8:23   ` Michal Hocko
2020-11-20 10:41     ` [External] " Muchun Song
2020-11-20 10:41       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 19/21] mm/hugetlb: Gather discrete indexes of tail page Muchun Song
2020-11-20  6:43 ` [PATCH v5 20/21] mm/hugetlb: Add BUILD_BUG_ON to catch invalid usage of tail struct page Muchun Song
2020-11-20  6:43 ` [PATCH v5 21/21] mm/hugetlb: Disable freeing vmemmap if struct page size is not power of two Muchun Song
2020-11-20  8:25   ` Michal Hocko
2020-11-20  9:15     ` David Hildenbrand
2020-11-22 13:30       ` Mike Rapoport
2020-11-22 19:00     ` Matthew Wilcox
2020-11-23  3:14       ` [External] " Muchun Song
2020-11-23  3:14         ` Muchun Song
2020-11-20  9:16   ` David Hildenbrand
2020-11-20 10:42     ` [External] " Muchun Song
2020-11-20 10:42       ` Muchun Song
2020-11-20  8:42 ` [PATCH v5 00/21] Free some vmemmap pages of hugetlb page Michal Hocko
2020-11-20  9:27   ` David Hildenbrand
2020-11-20  9:39     ` Michal Hocko
2020-11-20  9:43       ` David Hildenbrand
2020-11-20 17:45         ` Mike Kravetz
2020-11-20 18:00           ` David Hildenbrand
2020-11-22  7:29           ` [External] " Muchun Song
2020-11-22  7:29             ` Muchun Song
2020-11-23  7:38           ` Michal Hocko
2020-11-23 21:52             ` Mike Kravetz
2020-11-23 22:01               ` Matthew Wilcox
2020-11-20 12:40   ` [External] " Muchun Song
2020-11-20 12:40     ` Muchun Song
2020-11-20 13:11     ` Michal Hocko
2020-11-20 15:44       ` Muchun Song
2020-11-20 15:44         ` Muchun Song
2020-11-23  7:40         ` Michal Hocko
2020-11-23  8:53           ` Muchun Song
2020-11-23  8:53             ` Muchun Song
2020-11-23  9:43             ` Michal Hocko
2020-11-23 10:36               ` Muchun Song
2020-11-23 10:36                 ` Muchun Song
2020-11-23 10:42                 ` Michal Hocko
2020-11-23 11:16                   ` Muchun Song
2020-11-23 11:16                     ` Muchun Song
2020-11-23 11:32                     ` Michal Hocko
2020-11-23 12:07                       ` Muchun Song
2020-11-23 12:07                         ` Muchun Song
2020-11-23 12:18                         ` Michal Hocko
2020-11-23 12:40                           ` Muchun Song
2020-11-23 12:40                             ` Muchun Song
2020-11-23 12:48                             ` Michal Hocko
2020-11-23 12:45                   ` Matthew Wilcox
2020-11-23 13:05                     ` Muchun Song
2020-11-23 13:05                       ` Muchun Song
2020-11-23 13:13                     ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201120064325.34492-10-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=almasrymina@google.com \
    --cc=anshuman.khandual@arm.com \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=hpa@zytor.com \
    --cc=jroedel@suse.de \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mchehab+huawei@kernel.org \
    --cc=mhocko@suse.com \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=oneukum@suse.com \
    --cc=osalvador@suse.de \
    --cc=paulmck@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=peterz@infradead.org \
    --cc=rdunlap@infradead.org \
    --cc=rientjes@google.com \
    --cc=song.bao.hua@hisilicon.com \
    --cc=tglx@linutronix.de \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.