From: Muchun Song <songmuchun@bytedance.com> To: corbet@lwn.net, mike.kravetz@oracle.com, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com, dave.hansen@linux.intel.com, luto@kernel.org, peterz@infradead.org, viro@zeniv.linux.org.uk, akpm@linux-foundation.org, paulmck@kernel.org, mchehab+huawei@kernel.org, pawan.kumar.gupta@linux.intel.com, rdunlap@infradead.org, oneukum@suse.com, anshuman.khandual@arm.com, jroedel@suse.de, almasrymina@google.com, rientjes@google.com, willy@infradead.org Cc: duanxiongchun@bytedance.com, linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, linux-fsdevel@vger.kernel.org, Muchun Song <songmuchun@bytedance.com> Subject: [PATCH v2 17/19] mm/hugetlb: Merge pte to huge pmd only for gigantic page Date: Mon, 26 Oct 2020 22:51:12 +0800 Message-ID: <20201026145114.59424-18-songmuchun@bytedance.com> (raw) In-Reply-To: <20201026145114.59424-1-songmuchun@bytedance.com> Merge pte to huge pmd if it has ever been split. Now only support gigantic page which's vmemmap pages size is an integer multiple of PMD_SIZE. This is the simplest case to handle. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- arch/x86/include/asm/hugetlb.h | 8 +++ include/linux/hugetlb.h | 7 +++ mm/hugetlb.c | 106 ++++++++++++++++++++++++++++++++- 3 files changed, 119 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index 7c3eb60c2198..9f9e19dd0578 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h @@ -15,6 +15,14 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd) { return pmd_large(*pmd); } + +#define vmemmap_pmd_mkhuge vmemmap_pmd_mkhuge +static inline pmd_t vmemmap_pmd_mkhuge(struct page *page) +{ + pte_t entry = pfn_pte(page_to_pfn(page), PAGE_KERNEL_LARGE); + + return __pmd(pte_val(entry)); +} #endif #define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 695d3041ae7d..3a45199cc5c1 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -611,6 +611,13 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd) } #endif +#ifndef vmemmap_pmd_mkhuge +static inline pmd_t vmemmap_pmd_mkhuge(struct page *page) +{ + return pmd_mkhuge(mk_pmd(page, PAGE_KERNEL)); +} +#endif + #ifndef VMEMMAP_HPAGE_SHIFT #define VMEMMAP_HPAGE_SHIFT PMD_SHIFT #endif diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 82467d573fee..a526bcdb137b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1718,6 +1718,62 @@ static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep, } } +static void __replace_huge_page_pte_vmemmap(pte_t *ptep, unsigned long start, + unsigned int nr, struct page *huge, + struct list_head *free_pages) +{ + unsigned long addr; + unsigned long end = start + (nr << PAGE_SHIFT); + + for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) { + struct page *page; + pte_t old = *ptep; + pte_t entry; + + prepare_vmemmap_page(huge); + + entry = mk_pte(huge++, PAGE_KERNEL); + VM_WARN_ON(!pte_present(old)); + page = pte_page(old); + list_add(&page->lru, free_pages); + + set_pte_at(&init_mm, addr, ptep, entry); + } +} + +static void replace_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start, + struct page *huge, + struct list_head *free_pages) +{ + unsigned long end = start + VMEMMAP_HPAGE_SIZE; + + flush_cache_vunmap(start, end); + __replace_huge_page_pte_vmemmap(pte_offset_kernel(pmd, start), start, + VMEMMAP_HPAGE_NR, huge, free_pages); + flush_tlb_kernel_range(start, end); +} + +static pte_t *merge_vmemmap_pte(pmd_t *pmdp, unsigned long addr) +{ + pte_t *pte; + struct page *page; + + pte = pte_offset_kernel(pmdp, addr); + page = pte_page(*pte); + set_pmd(pmdp, vmemmap_pmd_mkhuge(page)); + + return pte; +} + +static void merge_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start, + struct page *huge, + struct list_head *free_pages) +{ + replace_huge_page_pmd_vmemmap(pmd, start, huge, free_pages); + pte_free_kernel(&init_mm, merge_vmemmap_pte(pmd, start)); + flush_tlb_kernel_range(start, start + VMEMMAP_HPAGE_SIZE); +} + static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list) { int i; @@ -1731,6 +1787,15 @@ static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list) } } +static inline void dissolve_compound_page(struct page *page, unsigned int order) +{ + int i; + unsigned int nr_pages = 1 << order; + + for (i = 1; i < nr_pages; i++) + set_page_refcounted(page + i); +} + static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head) { pmd_t *pmd; @@ -1750,10 +1815,47 @@ static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head) __remap_huge_page_pte_vmemmap); if (!freed_vmemmap_hpage_dec(pmd_page(*pmd)) && pmd_split(pmd)) { /* - * Todo: - * Merge pte to huge pmd if it has ever been split. + * Merge pte to huge pmd if it has ever been split. Now only + * support gigantic page which's vmemmap pages size is an + * integer multiple of PMD_SIZE. This is the simplest case + * to handle. */ clear_pmd_split(pmd); + + if (IS_ALIGNED(nr_vmemmap(h), VMEMMAP_HPAGE_NR)) { + unsigned long addr = (unsigned long)head; + unsigned long end = addr + nr_vmemmap_size(h); + + spin_unlock(ptl); + + for (; addr < end; addr += VMEMMAP_HPAGE_SIZE) { + void *to; + struct page *page; + + page = alloc_pages(GFP_VMEMMAP_PAGE & ~__GFP_NOFAIL, + VMEMMAP_HPAGE_ORDER); + dissolve_compound_page(page, + VMEMMAP_HPAGE_ORDER); + if (!page) + goto out; + + to = page_to_virt(page); + memcpy(to, (void *)addr, VMEMMAP_HPAGE_SIZE); + + /* + * Make sure that any data that writes to the + * @to is made visible to the physical page. + */ + flush_kernel_vmap_range(to, VMEMMAP_HPAGE_SIZE); + + merge_huge_page_pmd_vmemmap(pmd++, addr, page, + &remap_pages); + } + +out: + free_vmemmap_page_list(&remap_pages); + return; + } } spin_unlock(ptl); } -- 2.20.1
next prev parent reply index Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-10-26 14:50 [PATCH v2 00/19] Free some vmemmap pages of hugetlb page Muchun Song 2020-10-26 14:50 ` [PATCH v2 01/19] mm/memory_hotplug: Move bootmem info registration API to bootmem_info.c Muchun Song 2020-10-26 14:50 ` [PATCH v2 02/19] mm/memory_hotplug: Move {get,put}_page_bootmem() " Muchun Song 2020-10-26 14:50 ` [PATCH v2 03/19] mm/hugetlb: Introduce a new config HUGETLB_PAGE_FREE_VMEMMAP Muchun Song 2020-10-29 10:29 ` Oscar Salvador 2020-10-29 13:34 ` [External] " Muchun Song 2020-10-26 14:50 ` [PATCH v2 04/19] mm/hugetlb: Introduce nr_free_vmemmap_pages in the struct hstate Muchun Song 2020-10-27 22:03 ` Mike Kravetz 2020-10-29 13:26 ` Oscar Salvador 2020-10-29 13:41 ` [External] " Muchun Song 2020-10-26 14:51 ` [PATCH v2 05/19] mm/hugetlb: Introduce pgtable allocation/freeing helpers Muchun Song 2020-10-28 0:32 ` Mike Kravetz 2020-10-28 7:26 ` [External] " Muchun Song 2020-10-28 23:42 ` Mike Kravetz 2020-11-05 13:23 ` Oscar Salvador 2020-11-05 16:08 ` [External] " Muchun Song 2020-11-06 9:46 ` Oscar Salvador 2020-11-06 16:43 ` Muchun Song 2020-10-26 14:51 ` [PATCH v2 06/19] mm/bootmem_info: Introduce {free,prepare}_vmemmap_page() Muchun Song 2020-10-26 14:51 ` [PATCH v2 07/19] mm/hugetlb: Free the vmemmap pages associated with each hugetlb page Muchun Song 2020-10-26 16:01 ` Matthew Wilcox 2020-10-27 2:58 ` [External] " Muchun Song 2020-10-28 23:42 ` Mike Kravetz 2020-10-29 6:13 ` [External] " Muchun Song 2020-10-29 21:59 ` Mike Kravetz 2020-10-30 2:58 ` Muchun Song 2020-10-26 14:51 ` [PATCH v2 08/19] mm/hugetlb: Defer freeing of hugetlb pages Muchun Song 2020-10-26 14:51 ` [PATCH v2 09/19] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page Muchun Song 2020-10-26 14:51 ` [PATCH v2 10/19] mm/hugetlb: Introduce remap_huge_page_pmd_vmemmap helper Muchun Song 2020-10-26 14:51 ` [PATCH v2 11/19] mm/hugetlb: Use PG_slab to indicate split pmd Muchun Song 2020-10-26 14:51 ` [PATCH v2 12/19] mm/hugetlb: Support freeing vmemmap pages of gigantic page Muchun Song 2020-10-26 14:51 ` [PATCH v2 13/19] mm/hugetlb: Add a BUILD_BUG_ON to check if struct page size is a power of two Muchun Song 2020-10-26 14:51 ` [PATCH v2 14/19] mm/hugetlb: Clear PageHWPoison on the non-error memory page Muchun Song 2020-10-26 14:51 ` [PATCH v2 15/19] mm/hugetlb: Flush work when dissolving hugetlb page Muchun Song 2020-10-26 14:51 ` [PATCH v2 16/19] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap Muchun Song 2020-10-26 14:51 ` Muchun Song [this message] 2020-10-26 14:51 ` [PATCH v2 18/19] mm/hugetlb: Gather discrete indexes of tail page Muchun Song 2020-10-26 14:51 ` [PATCH v2 19/19] mm/hugetlb: Add BUILD_BUG_ON to catch invalid usage of tail struct page Muchun Song 2020-10-26 15:53 ` [PATCH v2 00/19] Free some vmemmap pages of hugetlb page Matthew Wilcox 2020-10-27 2:54 ` [External] " Muchun Song 2020-10-30 9:14 ` Michal Hocko 2020-10-30 10:24 ` [External] " Muchun Song 2020-10-30 15:19 ` Michal Hocko
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20201026145114.59424-18-songmuchun@bytedance.com \ --to=songmuchun@bytedance.com \ --cc=akpm@linux-foundation.org \ --cc=almasrymina@google.com \ --cc=anshuman.khandual@arm.com \ --cc=bp@alien8.de \ --cc=corbet@lwn.net \ --cc=dave.hansen@linux.intel.com \ --cc=duanxiongchun@bytedance.com \ --cc=hpa@zytor.com \ --cc=jroedel@suse.de \ --cc=linux-doc@vger.kernel.org \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=luto@kernel.org \ --cc=mchehab+huawei@kernel.org \ --cc=mike.kravetz@oracle.com \ --cc=mingo@redhat.com \ --cc=oneukum@suse.com \ --cc=paulmck@kernel.org \ --cc=pawan.kumar.gupta@linux.intel.com \ --cc=peterz@infradead.org \ --cc=rdunlap@infradead.org \ --cc=rientjes@google.com \ --cc=tglx@linutronix.de \ --cc=viro@zeniv.linux.org.uk \ --cc=willy@infradead.org \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Linux-Fsdevel Archive on lore.kernel.org Archives are clonable: git clone --mirror https://lore.kernel.org/linux-fsdevel/0 linux-fsdevel/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 linux-fsdevel linux-fsdevel/ https://lore.kernel.org/linux-fsdevel \ linux-fsdevel@vger.kernel.org public-inbox-index linux-fsdevel Example config snippet for mirrors Newsgroup available over NNTP: nntp://nntp.lore.kernel.org/org.kernel.vger.linux-fsdevel AGPL code for this site: git clone https://public-inbox.org/public-inbox.git