All of lore.kernel.org
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: corbet@lwn.net, mike.kravetz@oracle.com, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, viro@zeniv.linux.org.uk,
	akpm@linux-foundation.org, paulmck@kernel.org,
	mchehab+huawei@kernel.org, pawan.kumar.gupta@linux.intel.com,
	rdunlap@infradead.org, oneukum@suse.com,
	anshuman.khandual@arm.com, jroedel@suse.de,
	almasrymina@google.com, rientjes@google.com, willy@infradead.org,
	osalvador@suse.de, mhocko@suse.com, song.bao.hua@hisilicon.com
Cc: duanxiongchun@bytedance.com, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v5 12/21] mm/hugetlb: Introduce remap_huge_page_pmd_vmemmap helper
Date: Fri, 20 Nov 2020 14:43:16 +0800	[thread overview]
Message-ID: <20201120064325.34492-13-songmuchun@bytedance.com> (raw)
In-Reply-To: <20201120064325.34492-1-songmuchun@bytedance.com>

The __free_huge_page_pmd_vmemmap and __remap_huge_page_pmd_vmemmap are
almost the same code. So introduce remap_free_huge_page_pmd_vmemmap
helper to simplify the code.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/hugetlb_vmemmap.c | 108 +++++++++++++++++++++------------------------------
 1 file changed, 45 insertions(+), 63 deletions(-)

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 361c4174e222..06e2b8a7b7c8 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -252,6 +252,47 @@ static inline int freed_vmemmap_hpage_dec(struct page *page)
 	return atomic_dec_return_relaxed(&page->_mapcount) + 1;
 }
 
+static inline void free_vmemmap_page_list(struct list_head *list)
+{
+	struct page *page, *next;
+
+	list_for_each_entry_safe(page, next, list, lru) {
+		list_del(&page->lru);
+		free_vmemmap_page(page);
+	}
+}
+
+typedef void (*remap_pte_fn)(struct page *reuse, pte_t *ptep,
+			     unsigned long start, unsigned long end,
+			     struct list_head *pages);
+
+static void remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+					unsigned long addr,
+					struct list_head *pages,
+					remap_pte_fn remap_fn)
+{
+	unsigned long next;
+	unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
+	unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
+	struct page *reuse = NULL;
+
+	flush_cache_vunmap(start, end);
+
+	addr = start;
+	do {
+		pte_t *ptep;
+
+		ptep = pte_offset_kernel(pmd, addr);
+		if (!reuse)
+			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
+
+		next = vmemmap_hpage_addr_end(addr, end);
+		remap_fn(reuse, ptep, addr, next, pages);
+	} while (pmd++, addr = next, addr != end);
+
+	flush_tlb_kernel_range(start, end);
+}
+
 static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 					  unsigned long start,
 					  unsigned long end,
@@ -286,31 +327,6 @@ static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 	}
 }
 
-static void __remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
-					  unsigned long addr,
-					  struct list_head *remap_pages)
-{
-	unsigned long next;
-	unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
-	unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
-	struct page *reuse = NULL;
-
-	addr = start;
-	do {
-		pte_t *ptep;
-
-		ptep = pte_offset_kernel(pmd, addr);
-		if (!reuse)
-			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
-
-		next = vmemmap_hpage_addr_end(addr, end);
-		__remap_huge_page_pte_vmemmap(reuse, ptep, addr, next,
-					      remap_pages);
-	} while (pmd++, addr = next, addr != end);
-
-	flush_tlb_kernel_range(start, end);
-}
-
 static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
 {
 	int i;
@@ -339,8 +355,8 @@ void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
 	BUG_ON(!pmd);
 
 	ptl = vmemmap_pmd_lock(pmd);
-	__remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head,
-				      &remap_pages);
+	remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &remap_pages,
+				    __remap_huge_page_pte_vmemmap);
 	if (!freed_vmemmap_hpage_dec(pmd_page(*pmd))) {
 		/*
 		 * Todo:
@@ -350,16 +366,6 @@ void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
 	spin_unlock(ptl);
 }
 
-static inline void free_vmemmap_page_list(struct list_head *list)
-{
-	struct page *page, *next;
-
-	list_for_each_entry_safe(page, next, list, lru) {
-		list_del(&page->lru);
-		free_vmemmap_page(page);
-	}
-}
-
 static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 					 unsigned long start,
 					 unsigned long end,
@@ -382,31 +388,6 @@ static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 	}
 }
 
-static void __free_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
-					 unsigned long addr,
-					 struct list_head *free_pages)
-{
-	unsigned long next;
-	unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
-	unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
-	struct page *reuse = NULL;
-
-	addr = start;
-	do {
-		pte_t *ptep;
-
-		ptep = pte_offset_kernel(pmd, addr);
-		if (!reuse)
-			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
-
-		next = vmemmap_hpage_addr_end(addr, end);
-		__free_huge_page_pte_vmemmap(reuse, ptep, addr, next,
-					     free_pages);
-	} while (pmd++, addr = next, addr != end);
-
-	flush_tlb_kernel_range(start, end);
-}
-
 static void split_vmemmap_pmd(pmd_t *pmd, pte_t *pte_p, unsigned long addr)
 {
 	int i;
@@ -465,7 +446,8 @@ void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 	if (vmemmap_pmd_huge(pmd))
 		split_vmemmap_huge_page(head, pmd);
 
-	__free_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages);
+	remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages,
+				    __free_huge_page_pte_vmemmap);
 	freed_vmemmap_hpage_inc(pmd_page(*pmd));
 	spin_unlock(ptl);
 
-- 
2.11.0


  parent reply	other threads:[~2020-11-20  6:48 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-20  6:43 [PATCH v5 00/21] Free some vmemmap pages of hugetlb page Muchun Song
2020-11-20  6:43 ` [PATCH v5 01/21] mm/memory_hotplug: Move bootmem info registration API to bootmem_info.c Muchun Song
2020-11-20  6:43 ` [PATCH v5 02/21] mm/memory_hotplug: Move {get,put}_page_bootmem() " Muchun Song
2020-11-20  6:43 ` [PATCH v5 03/21] mm/hugetlb: Introduce a new config HUGETLB_PAGE_FREE_VMEMMAP Muchun Song
2020-11-20  7:49   ` Michal Hocko
2020-11-20  8:35     ` [External] " Muchun Song
2020-11-20  8:35       ` Muchun Song
2020-11-20  8:47       ` Michal Hocko
2020-11-20  8:53         ` Muchun Song
2020-11-20  8:53           ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 04/21] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate Muchun Song
2020-11-20  6:43 ` [PATCH v5 05/21] mm/hugetlb: Introduce pgtable allocation/freeing helpers Muchun Song
2020-11-20  6:43 ` [PATCH v5 06/21] mm/bootmem_info: Introduce {free,prepare}_vmemmap_page() Muchun Song
2020-11-20  6:43 ` [PATCH v5 07/21] mm/bootmem_info: Combine bootmem info and type into page->freelist Muchun Song
2020-11-20  6:43 ` [PATCH v5 08/21] mm/hugetlb: Initialize page table lock for vmemmap Muchun Song
2020-11-20  6:43 ` [PATCH v5 09/21] mm/hugetlb: Free the vmemmap pages associated with each hugetlb page Muchun Song
2020-11-20  6:43 ` [PATCH v5 10/21] mm/hugetlb: Defer freeing of hugetlb pages Muchun Song
2020-11-20  6:43 ` [PATCH v5 11/21] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page Muchun Song
2020-11-20  8:11   ` Michal Hocko
2020-11-20  8:51     ` [External] " Muchun Song
2020-11-20  8:51       ` Muchun Song
2020-11-20  9:28       ` Michal Hocko
2020-11-20  9:37         ` Muchun Song
2020-11-20  9:37           ` Muchun Song
2020-11-20 11:10           ` Michal Hocko
2020-11-20 11:56             ` Muchun Song
2020-11-20 11:56               ` Muchun Song
2020-11-20  6:43 ` Muchun Song [this message]
2020-11-20  6:43 ` [PATCH v5 13/21] mm/hugetlb: Use PG_slab to indicate split pmd Muchun Song
2020-11-20  8:16   ` Michal Hocko
2020-11-20  9:30     ` [External] " Muchun Song
2020-11-20  9:30       ` Muchun Song
2020-11-23  7:48       ` Michal Hocko
2020-11-23  8:01         ` Muchun Song
2020-11-23  8:01           ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 14/21] mm/hugetlb: Support freeing vmemmap pages of gigantic page Muchun Song
2020-11-20  6:43 ` [PATCH v5 15/21] mm/hugetlb: Set the PageHWPoison to the raw error page Muchun Song
2020-11-20  8:19   ` Michal Hocko
2020-11-20 10:32     ` [External] " Muchun Song
2020-11-20 10:32       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 16/21] mm/hugetlb: Flush work when dissolving hugetlb page Muchun Song
2020-11-20  8:20   ` Michal Hocko
2020-11-20  6:43 ` [PATCH v5 17/21] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap Muchun Song
2020-11-20  8:22   ` Michal Hocko
2020-11-20 10:39     ` [External] " Muchun Song
2020-11-20 10:39       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 18/21] mm/hugetlb: Merge pte to huge pmd only for gigantic page Muchun Song
2020-11-20  8:23   ` Michal Hocko
2020-11-20 10:41     ` [External] " Muchun Song
2020-11-20 10:41       ` Muchun Song
2020-11-20  6:43 ` [PATCH v5 19/21] mm/hugetlb: Gather discrete indexes of tail page Muchun Song
2020-11-20  6:43 ` [PATCH v5 20/21] mm/hugetlb: Add BUILD_BUG_ON to catch invalid usage of tail struct page Muchun Song
2020-11-20  6:43 ` [PATCH v5 21/21] mm/hugetlb: Disable freeing vmemmap if struct page size is not power of two Muchun Song
2020-11-20  8:25   ` Michal Hocko
2020-11-20  9:15     ` David Hildenbrand
2020-11-22 13:30       ` Mike Rapoport
2020-11-22 19:00     ` Matthew Wilcox
2020-11-23  3:14       ` [External] " Muchun Song
2020-11-23  3:14         ` Muchun Song
2020-11-20  9:16   ` David Hildenbrand
2020-11-20 10:42     ` [External] " Muchun Song
2020-11-20 10:42       ` Muchun Song
2020-11-20  8:42 ` [PATCH v5 00/21] Free some vmemmap pages of hugetlb page Michal Hocko
2020-11-20  9:27   ` David Hildenbrand
2020-11-20  9:39     ` Michal Hocko
2020-11-20  9:43       ` David Hildenbrand
2020-11-20 17:45         ` Mike Kravetz
2020-11-20 18:00           ` David Hildenbrand
2020-11-22  7:29           ` [External] " Muchun Song
2020-11-22  7:29             ` Muchun Song
2020-11-23  7:38           ` Michal Hocko
2020-11-23 21:52             ` Mike Kravetz
2020-11-23 22:01               ` Matthew Wilcox
2020-11-20 12:40   ` [External] " Muchun Song
2020-11-20 12:40     ` Muchun Song
2020-11-20 13:11     ` Michal Hocko
2020-11-20 15:44       ` Muchun Song
2020-11-20 15:44         ` Muchun Song
2020-11-23  7:40         ` Michal Hocko
2020-11-23  8:53           ` Muchun Song
2020-11-23  8:53             ` Muchun Song
2020-11-23  9:43             ` Michal Hocko
2020-11-23 10:36               ` Muchun Song
2020-11-23 10:36                 ` Muchun Song
2020-11-23 10:42                 ` Michal Hocko
2020-11-23 11:16                   ` Muchun Song
2020-11-23 11:16                     ` Muchun Song
2020-11-23 11:32                     ` Michal Hocko
2020-11-23 12:07                       ` Muchun Song
2020-11-23 12:07                         ` Muchun Song
2020-11-23 12:18                         ` Michal Hocko
2020-11-23 12:40                           ` Muchun Song
2020-11-23 12:40                             ` Muchun Song
2020-11-23 12:48                             ` Michal Hocko
2020-11-23 12:45                   ` Matthew Wilcox
2020-11-23 13:05                     ` Muchun Song
2020-11-23 13:05                       ` Muchun Song
2020-11-23 13:13                     ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201120064325.34492-13-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=almasrymina@google.com \
    --cc=anshuman.khandual@arm.com \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=hpa@zytor.com \
    --cc=jroedel@suse.de \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mchehab+huawei@kernel.org \
    --cc=mhocko@suse.com \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=oneukum@suse.com \
    --cc=osalvador@suse.de \
    --cc=paulmck@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=peterz@infradead.org \
    --cc=rdunlap@infradead.org \
    --cc=rientjes@google.com \
    --cc=song.bao.hua@hisilicon.com \
    --cc=tglx@linutronix.de \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.