linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] arm64: mm: free unused memmap for sparse memory model that define VMEMMAP
@ 2020-07-21  7:32 Wei Li
  2020-07-22  6:07 ` Mike Rapoport
  2020-07-23  2:33 ` Anshuman Khandual
  0 siblings, 2 replies; 13+ messages in thread
From: Wei Li @ 2020-07-21  7:32 UTC (permalink / raw)
  To: catalin.marinas, will
  Cc: liwei213, saberlily.xia, puck.chen, butao, fengbaopeng2,
	nsaenzjulienne, steve.capper, rppt, song.bao.hua,
	linux-arm-kernel, linux-kernel, sujunfei2

For the memory hole, sparse memory model that define SPARSEMEM_VMEMMAP
do not free the reserved memory for the page map, this patch do it.

Signed-off-by: Wei Li <liwei213@huawei.com>
Signed-off-by: Chen Feng <puck.chen@hisilicon.com>
Signed-off-by: Xia Qing <saberlily.xia@hisilicon.com>
---
 arch/arm64/mm/init.c | 81 +++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 71 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 1e93cfc7c47a..d1b56b47d5ba 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -441,7 +441,48 @@ void __init bootmem_init(void)
 	memblock_dump_all();
 }

-#ifndef CONFIG_SPARSEMEM_VMEMMAP
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define VMEMMAP_PAGE_INUSE 0xFD
+static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+	unsigned long addr, end;
+	unsigned long next;
+	pmd_t *pmd;
+	void *page_addr;
+	phys_addr_t phys_addr;
+
+	addr = (unsigned long)pfn_to_page(start_pfn);
+	end = (unsigned long)pfn_to_page(end_pfn);
+
+	pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
+	for (; addr < end; addr = next, pmd++) {
+		next = pmd_addr_end(addr, end);
+
+		if (!pmd_present(*pmd))
+			continue;
+
+		if (IS_ALIGNED(addr, PMD_SIZE) &&
+			IS_ALIGNED(next, PMD_SIZE)) {
+			phys_addr = __pfn_to_phys(pmd_pfn(*pmd));
+			free_bootmem(phys_addr, PMD_SIZE);
+			pmd_clear(pmd);
+		} else {
+			/* If here, we are freeing vmemmap pages. */
+			memset((void *)addr, VMEMMAP_PAGE_INUSE, next - addr);
+			page_addr = page_address(pmd_page(*pmd));
+
+			if (!memchr_inv(page_addr, VMEMMAP_PAGE_INUSE,
+				PMD_SIZE)) {
+				phys_addr = __pfn_to_phys(pmd_pfn(*pmd));
+				free_bootmem(phys_addr, PMD_SIZE);
+				pmd_clear(pmd);
+			}
+		}
+	}
+
+	flush_tlb_all();
+}
+#else
 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
 {
 	struct page *start_pg, *end_pg;
@@ -468,31 +509,53 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
 		memblock_free(pg, pgend - pg);
 }

+#endif
+
 /*
  * The mem_map array can get very big. Free the unused area of the memory map.
  */
 static void __init free_unused_memmap(void)
 {
-	unsigned long start, prev_end = 0;
+	unsigned long start, cur_start, prev_end = 0;
 	struct memblock_region *reg;

 	for_each_memblock(memory, reg) {
-		start = __phys_to_pfn(reg->base);
+		cur_start = __phys_to_pfn(reg->base);

 #ifdef CONFIG_SPARSEMEM
 		/*
 		 * Take care not to free memmap entries that don't exist due
 		 * to SPARSEMEM sections which aren't present.
 		 */
-		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
+		start = min(cur_start, ALIGN(prev_end, PAGES_PER_SECTION));
+
 		/*
-		 * If we had a previous bank, and there is a space between the
-		 * current bank and the previous, free it.
+		 * Free memory in the case of:
+		 * 1. if cur_start - prev_end <= PAGES_PER_SECTION,
+		 * free pre_end ~ cur_start.
+		 * 2. if cur_start - prev_end > PAGES_PER_SECTION,
+		 * free pre_end ~ ALIGN(prev_end, PAGES_PER_SECTION).
 		 */
 		if (prev_end && prev_end < start)
 			free_memmap(prev_end, start);

+		/*
+		 * Free memory in the case of:
+		 * if cur_start - prev_end > PAGES_PER_SECTION,
+		 * free ALIGN_DOWN(cur_start, PAGES_PER_SECTION) ~ cur_start.
+		 */
+		if (cur_start > start &&
+		    !IS_ALIGNED(cur_start, PAGES_PER_SECTION))
+			free_memmap(ALIGN_DOWN(cur_start, PAGES_PER_SECTION),
+				    cur_start);
+#else
+		/*
+		 * If we had a previous bank, and there is a space between the
+		 * current bank and the previous, free it.
+		 */
+		if (prev_end && prev_end < cur_start)
+			free_memmap(prev_end, cur_start);
+#endif
 		/*
 		 * Align up here since the VM subsystem insists that the
 		 * memmap entries are valid from the bank end aligned to
@@ -507,7 +570,6 @@ static void __init free_unused_memmap(void)
 		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
 #endif
 }
-#endif	/* !CONFIG_SPARSEMEM_VMEMMAP */

 /*
  * mem_init() marks the free areas in the mem_map and tells us how much memory
@@ -524,9 +586,8 @@ void __init mem_init(void)

 	set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);

-#ifndef CONFIG_SPARSEMEM_VMEMMAP
 	free_unused_memmap();
-#endif
+
 	/* this will put all unused low memory onto the freelists */
 	memblock_free_all();

--
2.15.0


^ permalink raw reply related	[flat|nested] 13+ messages in thread
[parent not found: <20200708015555.14946-1-liwei213@huawei.com>]

end of thread, other threads:[~2020-07-24  3:40 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-21  7:32 [PATCH] arm64: mm: free unused memmap for sparse memory model that define VMEMMAP Wei Li
2020-07-22  6:07 ` Mike Rapoport
2020-07-22  8:41   ` 答复: " liwei (CM)
2020-07-22 12:49     ` Catalin Marinas
2020-07-22 13:40       ` 答复: " liwei (CM)
2020-07-23 11:29         ` Catalin Marinas
2020-07-23 13:18           ` Mike Rapoport
2020-07-24  3:40             ` 答复: " liwei (CM)
2020-07-23  2:33 ` Anshuman Khandual
2020-07-23  3:28   ` 答复: " liwei (CM)
     [not found] <20200708015555.14946-1-liwei213@huawei.com>
2020-07-08  7:18 ` Song Bao Hua (Barry Song)
2020-07-08  7:51   ` 答复: " liwei (CM)
2020-07-09 12:27     ` Song Bao Hua (Barry Song)
2020-07-21  1:56       ` 答复: " liwei (CM)
2020-07-21  6:35         ` Mike Rapoport

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).