From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pg0-f69.google.com (mail-pg0-f69.google.com [74.125.83.69]) by kanga.kvack.org (Postfix) with ESMTP id 17F8A6B0288 for ; Mon, 18 Sep 2017 06:57:20 -0400 (EDT) Received: by mail-pg0-f69.google.com with SMTP id v82so153283pgb.5 for ; Mon, 18 Sep 2017 03:57:20 -0700 (PDT) Received: from mga05.intel.com (mga05.intel.com. [192.55.52.43]) by mx.google.com with ESMTPS id b5si3624081ple.493.2017.09.18.03.57.18 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 03:57:18 -0700 (PDT) From: "Kirill A. Shutemov" Subject: [PATCHv7 14/19] x86/mm: Fold p4d page table layer at runtime Date: Mon, 18 Sep 2017 13:55:48 +0300 Message-Id: <20170918105553.27914-15-kirill.shutemov@linux.intel.com> In-Reply-To: <20170918105553.27914-1-kirill.shutemov@linux.intel.com> References: <20170918105553.27914-1-kirill.shutemov@linux.intel.com> Sender: owner-linux-mm@kvack.org List-ID: To: Ingo Molnar , Linus Torvalds , x86@kernel.org, Thomas Gleixner , "H. Peter Anvin" Cc: Andrew Morton , Andy Lutomirski , Cyrill Gorcunov , Borislav Petkov , linux-mm@kvack.org, linux-kernel@vger.kernel.org, "Kirill A. Shutemov" This patch changes page table helpers to fold p4d at runtime. The logic is the same as in . Signed-off-by: Kirill A. Shutemov --- arch/x86/include/asm/paravirt.h | 10 ++++++---- arch/x86/include/asm/pgalloc.h | 5 ++++- arch/x86/include/asm/pgtable.h | 10 +++++++++- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 12deec722cf0..757fdac42a4a 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -569,14 +569,16 @@ static inline p4dval_t p4d_val(p4d_t p4d) static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) { - pgdval_t val = native_pgd_val(pgd); - - PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val); + if (pgtable_l5_enabled) + PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd)); + else + set_p4d((p4d_t *)(pgdp), (p4d_t) { pgd.pgd }); } static inline void pgd_clear(pgd_t *pgdp) { - set_pgd(pgdp, __pgd(0)); + if (pgtable_l5_enabled) + set_pgd(pgdp, __pgd(0)); } #endif /* CONFIG_PGTABLE_LEVELS == 5 */ diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index b2d0cd8288aa..1ce12afcfb04 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -155,6 +155,8 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #if CONFIG_PGTABLE_LEVELS > 4 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) { + if (!pgtable_l5_enabled) + return; paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); } @@ -179,7 +181,8 @@ extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d); static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long address) { - ___p4d_free_tlb(tlb, p4d); + if (pgtable_l5_enabled) + ___p4d_free_tlb(tlb, p4d); } #endif /* CONFIG_PGTABLE_LEVELS > 4 */ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index b714934512b3..51f37df28b7a 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -63,7 +63,7 @@ extern pmdval_t early_pmd_flags; #ifndef __PAGETABLE_P4D_FOLDED #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) -#define pgd_clear(pgd) native_pgd_clear(pgd) +#define pgd_clear(pgd) (pgtable_l5_enabled ? native_pgd_clear(pgd) : 0) #endif #ifndef set_p4d @@ -857,6 +857,8 @@ static inline unsigned long p4d_index(unsigned long address) #if CONFIG_PGTABLE_LEVELS > 4 static inline int pgd_present(pgd_t pgd) { + if (!pgtable_l5_enabled) + return 1; return pgd_flags(pgd) & _PAGE_PRESENT; } @@ -874,16 +876,22 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) /* to find an entry in a page-table-directory. */ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) { + if (!pgtable_l5_enabled) + return (p4d_t *)pgd; return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); } static inline int pgd_bad(pgd_t pgd) { + if (!pgtable_l5_enabled) + return 0; return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) { + if (!pgtable_l5_enabled) + return 0; /* * There is no need to do a workaround for the KNL stray * A/D bit erratum here. PGDs only point to page tables -- 2.14.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org