linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Linus Torvalds <torvalds@linux-foundation.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Arnd Bergmann <arnd@arndb.de>,
	"H. Peter Anvin" <hpa@zytor.com>
Cc: Andi Kleen <ak@linux.intel.com>,
	Dave Hansen <dave.hansen@intel.com>,
	Andy Lutomirski <luto@amacapital.net>,
	linux-arch@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv2 27/29] x86/mm: add support for 5-level paging for KASLR
Date: Tue, 27 Dec 2016 04:54:11 +0300	[thread overview]
Message-ID: <20161227015413.187403-28-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20161227015413.187403-1-kirill.shutemov@linux.intel.com>

With 5-level paging randomization happens on P4D level install of PUD.

Maximum amount of physical memory also bumped to 52-bits for 5-level
paging.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/mm/kaslr.c | 82 ++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 63 insertions(+), 19 deletions(-)

diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e57182716..662e5c4b21c8 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -6,12 +6,12 @@
  *
  * Entropy is generated using the KASLR early boot functions now shared in
  * the lib directory (originally written by Kees Cook). Randomization is
- * done on PGD & PUD page table levels to increase possible addresses. The
- * physical memory mapping code was adapted to support PUD level virtual
- * addresses. This implementation on the best configuration provides 30,000
- * possible virtual addresses in average for each memory region. An additional
- * low memory page is used to ensure each CPU can start with a PGD aligned
- * virtual address (for realmode).
+ * done on PGD & P4D/PUD page table levels to increase possible addresses.
+ * The physical memory mapping code was adapted to support P4D/PUD level
+ * virtual addresses. This implementation on the best configuration provides
+ * 30,000 possible virtual addresses in average for each memory region.
+ * An additional low memory page is used to ensure each CPU can start with
+ * a PGD aligned virtual address (for realmode).
  *
  * The order of each memory region is not changed. The feature looks at
  * the available space for the regions based on different configuration
@@ -70,7 +70,8 @@ static __initdata struct kaslr_memory_region {
 	unsigned long *base;
 	unsigned long size_tb;
 } kaslr_regions[] = {
-	{ &page_offset_base, 64/* Maximum */ },
+	{ &page_offset_base,
+		1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
 	{ &vmalloc_base, VMALLOC_SIZE_TB },
 	{ &vmemmap_base, 1 },
 };
@@ -142,7 +143,10 @@ void __init kernel_randomize_memory(void)
 		 */
 		entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
 		prandom_bytes_state(&rand_state, &rand, sizeof(rand));
-		entropy = (rand % (entropy + 1)) & PUD_MASK;
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			entropy = (rand % (entropy + 1)) & P4D_MASK;
+		else
+			entropy = (rand % (entropy + 1)) & PUD_MASK;
 		vaddr += entropy;
 		*kaslr_regions[i].base = vaddr;
 
@@ -151,27 +155,21 @@ void __init kernel_randomize_memory(void)
 		 * randomization alignment.
 		 */
 		vaddr += get_padding(&kaslr_regions[i]);
-		vaddr = round_up(vaddr + 1, PUD_SIZE);
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			vaddr = round_up(vaddr + 1, P4D_SIZE);
+		else
+			vaddr = round_up(vaddr + 1, PUD_SIZE);
 		remain_entropy -= entropy;
 	}
 }
 
-/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
- */
-void __meminit init_trampoline(void)
+static void __meminit init_trampoline_pud(void)
 {
 	unsigned long paddr, paddr_next;
 	pgd_t *pgd;
 	pud_t *pud_page, *pud_page_tramp;
 	int i;
 
-	if (!kaslr_memory_enabled()) {
-		init_trampoline_default();
-		return;
-	}
-
 	pud_page_tramp = alloc_low_page();
 
 	paddr = 0;
@@ -192,3 +190,49 @@ void __meminit init_trampoline(void)
 	set_pgd(&trampoline_pgd_entry,
 		__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
 }
+
+static void __meminit init_trampoline_p4d(void)
+{
+	unsigned long paddr, paddr_next;
+	pgd_t *pgd;
+	p4d_t *p4d_page, *p4d_page_tramp;
+	int i;
+
+	p4d_page_tramp = alloc_low_page();
+
+	paddr = 0;
+	pgd = pgd_offset_k((unsigned long)__va(paddr));
+	p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
+
+	for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+		p4d_t *p4d, *p4d_tramp;
+		unsigned long vaddr = (unsigned long)__va(paddr);
+
+		p4d_tramp = p4d_page_tramp + p4d_index(paddr);
+		p4d = p4d_page + p4d_index(vaddr);
+		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+		*p4d_tramp = *p4d;
+	}
+
+	set_pgd(&trampoline_pgd_entry,
+		__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+
+	if (!kaslr_memory_enabled()) {
+		init_trampoline_default();
+		return;
+	}
+
+	if (IS_ENABLED(CONFIG_X86_5LEVEL))
+		init_trampoline_p4d();
+	else
+		init_trampoline_pud();
+}
-- 
2.11.0

  parent reply	other threads:[~2016-12-27  1:55 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-27  1:53 [PATCHv2 00/29] 5-level paging Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 01/29] x86/cpufeature: Add 5-level paging detecton Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 02/29] asm-generic: introduce 5level-fixup.h Kirill A. Shutemov
2017-01-27 11:06   ` Vlastimil Babka
2017-01-27 11:30     ` Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 03/29] asm-generic: introduce __ARCH_USE_5LEVEL_HACK Kirill A. Shutemov
2017-01-27 13:24   ` Vlastimil Babka
2017-01-27 13:55     ` Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 04/29] arch, mm: convert all architectures to use 5level-fixup.h Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 05/29] asm-generic: introduce <asm-generic/pgtable-nop4d.h> Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 06/29] mm: convert generic code to 5-level paging Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 07/29] mm: introduce __p4d_alloc() Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 08/29] x86: basic changes into headers for 5-level paging Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 09/29] x86: trivial portion of 5-level paging conversion Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 10/29] x86/gup: add 5-level paging support Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 11/29] x86/ident_map: " Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 12/29] x86/mm: add support of p4d_t in vmalloc_fault() Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 13/29] x86/power: support p4d_t in hibernate code Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 14/29] x86/kexec: support p4d_t Kirill A. Shutemov
2016-12-27  1:53 ` [PATCHv2 15/29] x86: convert the rest of the code to " Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 16/29] x86: detect 5-level paging support Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 17/29] x86/asm: remove __VIRTUAL_MASK_SHIFT==47 assert Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 18/29] x86/mm: define virtual memory map for 5-level paging Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 19/29] x86/paravirt: make paravirt code support " Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 20/29] x86/mm: basic defines/helpers for CONFIG_X86_5LEVEL Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 21/29] x86/dump_pagetables: support 5-level paging Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 22/29] x86/mm: extend kasan to " Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 23/29] x86/espfix: " Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 24/29] x86/mm: add support of additional page table level during early boot Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 25/29] x86/mm: add sync_global_pgds() for configuration with 5-level paging Kirill A. Shutemov
2016-12-27  1:54 ` [PATCHv2 26/29] x86/mm: make kernel_physical_mapping_init() support " Kirill A. Shutemov
2016-12-27  1:54 ` Kirill A. Shutemov [this message]
2016-12-27  1:54 ` [PATCHv2 28/29] x86: enable 5-level paging support Kirill A. Shutemov
2016-12-27  1:54 ` [RFC, PATCHv2 29/29] mm, x86: introduce RLIMIT_VADDR Kirill A. Shutemov
2016-12-27  2:06   ` Andy Lutomirski
2016-12-27  2:24     ` Kirill A. Shutemov
2016-12-27  3:22       ` Andy Lutomirski
2017-01-02  9:09         ` Kirill A. Shutemov
2016-12-29  2:53       ` Carlos O'Donell
2016-12-31  2:08         ` Andy Lutomirski
2017-01-02  8:35           ` Kirill A. Shutemov
2017-01-13 20:11             ` H.J. Lu
2017-01-02  8:44   ` Arnd Bergmann
2017-01-03  6:08     ` Andy Lutomirski
2017-01-03 13:18       ` Arnd Bergmann
2017-01-03 18:29         ` Andy Lutomirski
2017-01-03 22:07           ` Arnd Bergmann
2017-01-03 22:09             ` Andy Lutomirski
2017-01-04 13:55               ` Arnd Bergmann
2017-01-03 16:04       ` Kirill A. Shutemov
2017-01-03 18:27         ` Andy Lutomirski
2017-01-04 14:19           ` Kirill A. Shutemov
2017-01-05 17:53             ` Andy Lutomirski
2017-01-05 19:13   ` Dave Hansen
2017-01-05 19:29     ` Kirill A. Shutemov
2017-01-05 19:39       ` Dave Hansen
2017-01-05 20:11         ` Kirill A. Shutemov
2017-01-05 20:14         ` Andy Lutomirski
2017-01-05 20:49           ` Dave Hansen
2017-01-05 21:27             ` Andy Lutomirski
2017-01-05 23:17               ` Dave Hansen
2017-01-11 14:29             ` Kirill A. Shutemov
2017-01-11 18:09               ` Andy Lutomirski
2017-01-11 18:37                 ` Kirill A. Shutemov
2017-01-11 18:49                   ` Dave Hansen
2017-01-11 19:20                     ` Andy Lutomirski
2017-01-11 19:31                       ` Linus Torvalds
2017-01-11 21:46                         ` Andi Kleen
2017-01-11 19:32                       ` Kirill A. Shutemov
2017-01-11 19:39                         ` Linus Torvalds
2017-01-11 18:26               ` Dave Hansen
2017-01-05 16:57 ` [PATCHv2 00/29] 5-level paging Kirill A. Shutemov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161227015413.187403-28-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=dave.hansen@intel.com \
    --cc=hpa@zytor.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@amacapital.net \
    --cc=mingo@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).