From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753333Ab2H1KCl (ORCPT ); Tue, 28 Aug 2012 06:02:41 -0400 Received: from cn.fujitsu.com ([222.73.24.84]:56189 "EHLO song.cn.fujitsu.com" rhost-flags-OK-FAIL-OK-OK) by vger.kernel.org with ESMTP id S1752850Ab2H1KAG (ORCPT ); Tue, 28 Aug 2012 06:00:06 -0400 X-IronPort-AV: E=Sophos;i="4.77,841,1336320000"; d="scan'208";a="5739734" From: wency@cn.fujitsu.com To: x86@kernel.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, linux-acpi@vger.kernel.org, linux-s390@vger.kernel.org, linux-sh@vger.kernel.org, linux-ia64@vger.kernel.org, cmetcalf@tilera.com, sparclinux@vger.kernel.org Cc: rientjes@google.com, liuj97@gmail.com, len.brown@intel.com, benh@kernel.crashing.org, paulus@samba.org, cl@linux.com, minchan.kim@gmail.com, akpm@linux-foundation.org, kosaki.motohiro@jp.fujitsu.com, isimatu.yasuaki@jp.fujitsu.com, Wen Congyang Subject: [RFC v8 PATCH 15/20] memory-hotplug: implement register_page_bootmem_info_section of sparse-vmemmap Date: Tue, 28 Aug 2012 18:00:22 +0800 Message-Id: <1346148027-24468-16-git-send-email-wency@cn.fujitsu.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1346148027-24468-1-git-send-email-wency@cn.fujitsu.com> References: <1346148027-24468-1-git-send-email-wency@cn.fujitsu.com> X-MIMETrack: Itemize by SMTP Server on mailserver/fnst(Release 8.5.3|September 15, 2011) at 2012/08/28 17:55:07, Serialize by Router on mailserver/fnst(Release 8.5.3|September 15, 2011) at 2012/08/28 17:55:18, Serialize complete at 2012/08/28 17:55:18 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Yasuaki Ishimatsu For removing memmap region of sparse-vmemmap which is allocated bootmem, memmap region of sparse-vmemmap needs to be registered by get_page_bootmem(). So the patch searches pages of virtual mapping and registers the pages by get_page_bootmem(). Note: register_page_bootmem_memmap() is not implemented for ia64, ppc, s390, and sparc. CC: David Rientjes CC: Jiang Liu CC: Len Brown CC: Benjamin Herrenschmidt CC: Paul Mackerras CC: Christoph Lameter Cc: Minchan Kim CC: Andrew Morton CC: KOSAKI Motohiro Signed-off-by: Yasuaki Ishimatsu Signed-off-by: Wen Congyang --- arch/ia64/mm/discontig.c | 6 ++++ arch/powerpc/mm/init_64.c | 6 ++++ arch/s390/mm/vmem.c | 6 ++++ arch/sparc/mm/init_64.c | 6 ++++ arch/x86/mm/init_64.c | 52 ++++++++++++++++++++++++++++++++++++++++ include/linux/memory_hotplug.h | 2 + include/linux/mm.h | 3 +- mm/memory_hotplug.c | 31 +++++++++++++++++++++-- 8 files changed, 108 insertions(+), 4 deletions(-) diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index c641333..33943db 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -822,4 +822,10 @@ int __meminit vmemmap_populate(struct page *start_page, { return vmemmap_populate_basepages(start_page, size, node); } + +void register_page_bootmem_memmap(unsigned long section_nr, + struct page *start_page, unsigned long size) +{ + /* TODO */ +} #endif diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 620b7ac..3690c44 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -298,5 +298,11 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } + +void register_page_bootmem_memmap(unsigned long section_nr, + struct page *start_page, unsigned long size) +{ + /* TODO */ +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 6f896e7..eda55cd 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -227,6 +227,12 @@ out: return ret; } +void register_page_bootmem_memmap(unsigned long section_nr, + struct page *start_page, unsigned long size) +{ + /* TODO */ +} + /* * Add memory segment to the segment list if it doesn't overlap with * an already present segment. diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index d58edf5..add1cc7 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2077,6 +2077,12 @@ void __meminit vmemmap_populate_print_last(void) node_start = 0; } } + +void register_page_bootmem_memmap(unsigned long section_nr, + struct page *start_page, unsigned long size) +{ + /* TODO */ +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index e0d88ba..0075592 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1138,6 +1138,58 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) return 0; } +void register_page_bootmem_memmap(unsigned long section_nr, + struct page *start_page, unsigned long size) +{ + unsigned long addr = (unsigned long)start_page; + unsigned long end = (unsigned long)(start_page + size); + unsigned long next; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + for (; addr < end; addr = next) { + pte_t *pte = NULL; + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) { + next = (addr + PAGE_SIZE) & PAGE_MASK; + continue; + } + get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) { + next = (addr + PAGE_SIZE) & PAGE_MASK; + continue; + } + get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); + + if (!cpu_has_pse) { + next = (addr + PAGE_SIZE) & PAGE_MASK; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + continue; + get_page_bootmem(section_nr, pmd_page(*pmd), + MIX_SECTION_INFO); + + pte = pte_offset_kernel(pmd, addr); + if (pte_none(*pte)) + continue; + get_page_bootmem(section_nr, pte_page(*pte), + SECTION_INFO); + } else { + next = pmd_addr_end(addr, end); + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + continue; + get_page_bootmem(section_nr, pmd_page(*pmd), + SECTION_INFO); + } + } +} + void __meminit vmemmap_populate_print_last(void) { if (p_start) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 1133e63..2d18235 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -164,6 +164,8 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) extern void register_page_bootmem_info_node(struct pglist_data *pgdat); extern void put_page_bootmem(struct page *page); +extern void get_page_bootmem(unsigned long ingo, struct page *page, + unsigned long type); /* * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug diff --git a/include/linux/mm.h b/include/linux/mm.h index 311be90..c607913 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1618,7 +1618,8 @@ int vmemmap_populate_basepages(struct page *start_page, unsigned long pages, int node); int vmemmap_populate(struct page *start_page, unsigned long pages, int node); void vmemmap_populate_print_last(void); - +void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, + unsigned long size); enum mf_flags { MF_COUNT_INCREASED = 1 << 0, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3ca66bc..d8495ff 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -91,8 +91,8 @@ static void release_memory_resource(struct resource *res) } #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE -static void get_page_bootmem(unsigned long info, struct page *page, - unsigned long type) +void get_page_bootmem(unsigned long info, struct page *page, + unsigned long type) { unsigned long page_type; @@ -164,8 +164,33 @@ static void register_page_bootmem_info_section(unsigned long start_pfn) } #else -static inline void register_page_bootmem_info_section(unsigned long start_pfn) +static void register_page_bootmem_info_section(unsigned long start_pfn) { + unsigned long *usemap, mapsize, section_nr, i; + struct mem_section *ms; + struct page *page, *memmap; + + if (!pfn_valid(start_pfn)) + return; + + section_nr = pfn_to_section_nr(start_pfn); + ms = __nr_to_section(section_nr); + + memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); + + page = virt_to_page(memmap); + mapsize = sizeof(struct page) * PAGES_PER_SECTION; + mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; + + register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); + + usemap = __nr_to_section(section_nr)->pageblock_flags; + page = virt_to_page(usemap); + + mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; + + for (i = 0; i < mapsize; i++, page++) + get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } #endif -- 1.7.1