All of lore.kernel.org
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: corbet@lwn.net, mike.kravetz@oracle.com, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, viro@zeniv.linux.org.uk,
	akpm@linux-foundation.org, paulmck@kernel.org,
	mchehab+huawei@kernel.org, pawan.kumar.gupta@linux.intel.com,
	rdunlap@infradead.org, oneukum@suse.com,
	anshuman.khandual@arm.com, jroedel@suse.de,
	almasrymina@google.com, rientjes@google.com, willy@infradead.org,
	osalvador@suse.de, mhocko@suse.com, song.bao.hua@hisilicon.com
Cc: duanxiongchun@bytedance.com, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v5 11/21] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page
Date: Fri, 20 Nov 2020 14:43:15 +0800	[thread overview]
Message-ID: <20201120064325.34492-12-songmuchun@bytedance.com> (raw)
In-Reply-To: <20201120064325.34492-1-songmuchun@bytedance.com>

When we free a hugetlb page to the buddy, we should allocate the vmemmap
pages associated with it. We can do that in the __free_hugepage().

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/hugetlb.c         |   2 ++
 mm/hugetlb_vmemmap.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++
 mm/hugetlb_vmemmap.h |   5 +++
 3 files changed, 107 insertions(+)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4aabf12aca9b..ba927ae7f9bd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1382,6 +1382,8 @@ static void __free_hugepage(struct hstate *h, struct page *page)
 {
 	int i;
 
+	alloc_huge_page_vmemmap(h, page);
+
 	for (i = 0; i < pages_per_huge_page(h); i++) {
 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
 				1 << PG_referenced | 1 << PG_dirty |
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index eda7e3a0b67c..361c4174e222 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -117,6 +117,8 @@
 #define RESERVE_VMEMMAP_NR		2U
 #define RESERVE_VMEMMAP_SIZE		(RESERVE_VMEMMAP_NR << PAGE_SHIFT)
 #define TAIL_PAGE_REUSE			-1
+#define GFP_VMEMMAP_PAGE		\
+	(GFP_KERNEL | __GFP_NOFAIL | __GFP_MEMALLOC)
 
 #ifndef VMEMMAP_HPAGE_SHIFT
 #define VMEMMAP_HPAGE_SHIFT		HPAGE_SHIFT
@@ -250,6 +252,104 @@ static inline int freed_vmemmap_hpage_dec(struct page *page)
 	return atomic_dec_return_relaxed(&page->_mapcount) + 1;
 }
 
+static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
+					  unsigned long start,
+					  unsigned long end,
+					  struct list_head *remap_pages)
+{
+	pgprot_t pgprot = PAGE_KERNEL;
+	void *from = page_to_virt(reuse);
+	unsigned long addr;
+
+	for (addr = start; addr < end; addr += PAGE_SIZE) {
+		void *to;
+		struct page *page;
+		pte_t entry, old = *ptep;
+
+		page = list_first_entry_or_null(remap_pages, struct page, lru);
+		list_del(&page->lru);
+		to = page_to_virt(page);
+		copy_page(to, from);
+
+		/*
+		 * Make sure that any data that writes to the @to is made
+		 * visible to the physical page.
+		 */
+		flush_kernel_vmap_range(to, PAGE_SIZE);
+
+		prepare_vmemmap_page(page);
+
+		entry = mk_pte(page, pgprot);
+		set_pte_at(&init_mm, addr, ptep++, entry);
+
+		VM_BUG_ON(!pte_present(old) || pte_page(old) != reuse);
+	}
+}
+
+static void __remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+					  unsigned long addr,
+					  struct list_head *remap_pages)
+{
+	unsigned long next;
+	unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
+	unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
+	struct page *reuse = NULL;
+
+	addr = start;
+	do {
+		pte_t *ptep;
+
+		ptep = pte_offset_kernel(pmd, addr);
+		if (!reuse)
+			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
+
+		next = vmemmap_hpage_addr_end(addr, end);
+		__remap_huge_page_pte_vmemmap(reuse, ptep, addr, next,
+					      remap_pages);
+	} while (pmd++, addr = next, addr != end);
+
+	flush_tlb_kernel_range(start, end);
+}
+
+static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
+{
+	int i;
+
+	for (i = 0; i < free_vmemmap_pages_per_hpage(h); i++) {
+		struct page *page;
+
+		/* This should not fail */
+		page = alloc_page(GFP_VMEMMAP_PAGE);
+		list_add_tail(&page->lru, list);
+	}
+}
+
+void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+	pmd_t *pmd;
+	spinlock_t *ptl;
+	LIST_HEAD(remap_pages);
+
+	if (!free_vmemmap_pages_per_hpage(h))
+		return;
+
+	alloc_vmemmap_pages(h, &remap_pages);
+
+	pmd = vmemmap_to_pmd((unsigned long)head);
+	BUG_ON(!pmd);
+
+	ptl = vmemmap_pmd_lock(pmd);
+	__remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head,
+				      &remap_pages);
+	if (!freed_vmemmap_hpage_dec(pmd_page(*pmd))) {
+		/*
+		 * Todo:
+		 * Merge pte to huge pmd if it has ever been split.
+		 */
+	}
+	spin_unlock(ptl);
+}
+
 static inline void free_vmemmap_page_list(struct list_head *list)
 {
 	struct page *page, *next;
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 4175b44f88bc..6dfa7ed6f88a 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -14,6 +14,7 @@
 void __init hugetlb_vmemmap_init(struct hstate *h);
 int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page);
 void vmemmap_pgtable_free(struct page *page);
+void alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
 void free_huge_page_vmemmap(struct hstate *h, struct page *head);
 
 static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
@@ -34,6 +35,10 @@ static inline void vmemmap_pgtable_free(struct page *page)
 {
 }
 
+static inline void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+}
+
 static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 }
-- 
2.11.0


  parent reply	other threads:[~2020-11-20  6:48 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-20  6:43 [PATCH v5 00/21] Free some vmemmap pages of hugetlb page Muchun Song
2020-11-20  6:43 ` [PATCH v5 01/21] mm/memory_hotplug: Move bootmem info registration API to bootmem_info.c Muchun Song
2020-11-20  6:43 ` [PATCH v5 02/21] mm/memory_hotplug: Move {get,put}_page_bootmem() " Muchun Song
2020-11-20  6:43 ` [PATCH v5 03/21] mm/hugetlb: Introduce a new config HUGETLB_PAGE_FREE_VMEMMAP Muchun Song
2020-11-20  7:49   ` Michal Hocko
2020-11-20  8:35     ` [External] " Muchun Song
2020-11-20  8:35       ` Muchun Song
2020-11-20  8:47       ` Michal Hocko
2020-11-20  8:53         ` Muchun Song
2020-11-20  8:53           ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 04/21] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate Muchun Song
2020-11-20  6:43 ` [PATCH v5 05/21] mm/hugetlb: Introduce pgtable allocation/freeing helpers Muchun Song
2020-11-20  6:43 ` [PATCH v5 06/21] mm/bootmem_info: Introduce {free,prepare}_vmemmap_page() Muchun Song
2020-11-20  6:43 ` [PATCH v5 07/21] mm/bootmem_info: Combine bootmem info and type into page->freelist Muchun Song
2020-11-20  6:43 ` [PATCH v5 08/21] mm/hugetlb: Initialize page table lock for vmemmap Muchun Song
2020-11-20  6:43 ` [PATCH v5 09/21] mm/hugetlb: Free the vmemmap pages associated with each hugetlb page Muchun Song
2020-11-20  6:43 ` [PATCH v5 10/21] mm/hugetlb: Defer freeing of hugetlb pages Muchun Song
2020-11-20  6:43 ` Muchun Song [this message]
2020-11-20  8:11   ` [PATCH v5 11/21] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page Michal Hocko
2020-11-20  8:51     ` [External] " Muchun Song
2020-11-20  8:51       ` Muchun Song
2020-11-20  9:28       ` Michal Hocko
2020-11-20  9:37         ` Muchun Song
2020-11-20  9:37           ` Muchun Song
2020-11-20 11:10           ` Michal Hocko
2020-11-20 11:56             ` Muchun Song
2020-11-20 11:56               ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 12/21] mm/hugetlb: Introduce remap_huge_page_pmd_vmemmap helper Muchun Song
2020-11-20  6:43 ` [PATCH v5 13/21] mm/hugetlb: Use PG_slab to indicate split pmd Muchun Song
2020-11-20  8:16   ` Michal Hocko
2020-11-20  9:30     ` [External] " Muchun Song
2020-11-20  9:30       ` Muchun Song
2020-11-23  7:48       ` Michal Hocko
2020-11-23  8:01         ` Muchun Song
2020-11-23  8:01           ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 14/21] mm/hugetlb: Support freeing vmemmap pages of gigantic page Muchun Song
2020-11-20  6:43 ` [PATCH v5 15/21] mm/hugetlb: Set the PageHWPoison to the raw error page Muchun Song
2020-11-20  8:19   ` Michal Hocko
2020-11-20 10:32     ` [External] " Muchun Song
2020-11-20 10:32       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 16/21] mm/hugetlb: Flush work when dissolving hugetlb page Muchun Song
2020-11-20  8:20   ` Michal Hocko
2020-11-20  6:43 ` [PATCH v5 17/21] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap Muchun Song
2020-11-20  8:22   ` Michal Hocko
2020-11-20 10:39     ` [External] " Muchun Song
2020-11-20 10:39       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 18/21] mm/hugetlb: Merge pte to huge pmd only for gigantic page Muchun Song
2020-11-20  8:23   ` Michal Hocko
2020-11-20 10:41     ` [External] " Muchun Song
2020-11-20 10:41       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 19/21] mm/hugetlb: Gather discrete indexes of tail page Muchun Song
2020-11-20  6:43 ` [PATCH v5 20/21] mm/hugetlb: Add BUILD_BUG_ON to catch invalid usage of tail struct page Muchun Song
2020-11-20  6:43 ` [PATCH v5 21/21] mm/hugetlb: Disable freeing vmemmap if struct page size is not power of two Muchun Song
2020-11-20  8:25   ` Michal Hocko
2020-11-20  9:15     ` David Hildenbrand
2020-11-22 13:30       ` Mike Rapoport
2020-11-22 19:00     ` Matthew Wilcox
2020-11-23  3:14       ` [External] " Muchun Song
2020-11-23  3:14         ` Muchun Song
2020-11-20  9:16   ` David Hildenbrand
2020-11-20 10:42     ` [External] " Muchun Song
2020-11-20 10:42       ` Muchun Song
2020-11-20  8:42 ` [PATCH v5 00/21] Free some vmemmap pages of hugetlb page Michal Hocko
2020-11-20  9:27   ` David Hildenbrand
2020-11-20  9:39     ` Michal Hocko
2020-11-20  9:43       ` David Hildenbrand
2020-11-20 17:45         ` Mike Kravetz
2020-11-20 18:00           ` David Hildenbrand
2020-11-22  7:29           ` [External] " Muchun Song
2020-11-22  7:29             ` Muchun Song
2020-11-23  7:38           ` Michal Hocko
2020-11-23 21:52             ` Mike Kravetz
2020-11-23 22:01               ` Matthew Wilcox
2020-11-20 12:40   ` [External] " Muchun Song
2020-11-20 12:40     ` Muchun Song
2020-11-20 13:11     ` Michal Hocko
2020-11-20 15:44       ` Muchun Song
2020-11-20 15:44         ` Muchun Song
2020-11-23  7:40         ` Michal Hocko
2020-11-23  8:53           ` Muchun Song
2020-11-23  8:53             ` Muchun Song
2020-11-23  9:43             ` Michal Hocko
2020-11-23 10:36               ` Muchun Song
2020-11-23 10:36                 ` Muchun Song
2020-11-23 10:42                 ` Michal Hocko
2020-11-23 11:16                   ` Muchun Song
2020-11-23 11:16                     ` Muchun Song
2020-11-23 11:32                     ` Michal Hocko
2020-11-23 12:07                       ` Muchun Song
2020-11-23 12:07                         ` Muchun Song
2020-11-23 12:18                         ` Michal Hocko
2020-11-23 12:40                           ` Muchun Song
2020-11-23 12:40                             ` Muchun Song
2020-11-23 12:48                             ` Michal Hocko
2020-11-23 12:45                   ` Matthew Wilcox
2020-11-23 13:05                     ` Muchun Song
2020-11-23 13:05                       ` Muchun Song
2020-11-23 13:13                     ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201120064325.34492-12-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=almasrymina@google.com \
    --cc=anshuman.khandual@arm.com \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=hpa@zytor.com \
    --cc=jroedel@suse.de \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mchehab+huawei@kernel.org \
    --cc=mhocko@suse.com \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=oneukum@suse.com \
    --cc=osalvador@suse.de \
    --cc=paulmck@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=peterz@infradead.org \
    --cc=rdunlap@infradead.org \
    --cc=rientjes@google.com \
    --cc=song.bao.hua@hisilicon.com \
    --cc=tglx@linutronix.de \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.