All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Linus Torvalds <torvalds@linux-foundation.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>
Cc: Andi Kleen <ak@linux.intel.com>,
	Dave Hansen <dave.hansen@intel.com>,
	Andy Lutomirski <luto@amacapital.net>,
	linux-arch@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 6/8] x86/mm: Add support for 5-level paging for KASLR
Date: Thu,  6 Apr 2017 17:01:04 +0300	[thread overview]
Message-ID: <20170406140106.78087-7-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20170406140106.78087-1-kirill.shutemov@linux.intel.com>

With 5-level paging randomization happens on P4D level instead of PUD.

Maximum amount of physical memory also bumped to 52-bits for 5-level
paging.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/mm/kaslr.c | 81 ++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 62 insertions(+), 19 deletions(-)

diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index aed206475aa7..af599167fe3c 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -6,12 +6,12 @@
  *
  * Entropy is generated using the KASLR early boot functions now shared in
  * the lib directory (originally written by Kees Cook). Randomization is
- * done on PGD & PUD page table levels to increase possible addresses. The
- * physical memory mapping code was adapted to support PUD level virtual
- * addresses. This implementation on the best configuration provides 30,000
- * possible virtual addresses in average for each memory region. An additional
- * low memory page is used to ensure each CPU can start with a PGD aligned
- * virtual address (for realmode).
+ * done on PGD & P4D/PUD page table levels to increase possible addresses.
+ * The physical memory mapping code was adapted to support P4D/PUD level
+ * virtual addresses. This implementation on the best configuration provides
+ * 30,000 possible virtual addresses in average for each memory region.
+ * An additional low memory page is used to ensure each CPU can start with
+ * a PGD aligned virtual address (for realmode).
  *
  * The order of each memory region is not changed. The feature looks at
  * the available space for the regions based on different configuration
@@ -70,7 +70,7 @@ static __initdata struct kaslr_memory_region {
 	unsigned long *base;
 	unsigned long size_tb;
 } kaslr_regions[] = {
-	{ &page_offset_base, 64/* Maximum */ },
+	{ &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
 	{ &vmalloc_base, VMALLOC_SIZE_TB },
 	{ &vmemmap_base, 1 },
 };
@@ -142,7 +142,10 @@ void __init kernel_randomize_memory(void)
 		 */
 		entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
 		prandom_bytes_state(&rand_state, &rand, sizeof(rand));
-		entropy = (rand % (entropy + 1)) & PUD_MASK;
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			entropy = (rand % (entropy + 1)) & P4D_MASK;
+		else
+			entropy = (rand % (entropy + 1)) & PUD_MASK;
 		vaddr += entropy;
 		*kaslr_regions[i].base = vaddr;
 
@@ -151,27 +154,21 @@ void __init kernel_randomize_memory(void)
 		 * randomization alignment.
 		 */
 		vaddr += get_padding(&kaslr_regions[i]);
-		vaddr = round_up(vaddr + 1, PUD_SIZE);
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			vaddr = round_up(vaddr + 1, P4D_SIZE);
+		else
+			vaddr = round_up(vaddr + 1, PUD_SIZE);
 		remain_entropy -= entropy;
 	}
 }
 
-/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
- */
-void __meminit init_trampoline(void)
+static void __meminit init_trampoline_pud(void)
 {
 	unsigned long paddr, paddr_next;
 	pgd_t *pgd;
 	pud_t *pud_page, *pud_page_tramp;
 	int i;
 
-	if (!kaslr_memory_enabled()) {
-		init_trampoline_default();
-		return;
-	}
-
 	pud_page_tramp = alloc_low_page();
 
 	paddr = 0;
@@ -192,3 +189,49 @@ void __meminit init_trampoline(void)
 	set_pgd(&trampoline_pgd_entry,
 		__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
 }
+
+static void __meminit init_trampoline_p4d(void)
+{
+	unsigned long paddr, paddr_next;
+	pgd_t *pgd;
+	p4d_t *p4d_page, *p4d_page_tramp;
+	int i;
+
+	p4d_page_tramp = alloc_low_page();
+
+	paddr = 0;
+	pgd = pgd_offset_k((unsigned long)__va(paddr));
+	p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
+
+	for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+		p4d_t *p4d, *p4d_tramp;
+		unsigned long vaddr = (unsigned long)__va(paddr);
+
+		p4d_tramp = p4d_page_tramp + p4d_index(paddr);
+		p4d = p4d_page + p4d_index(vaddr);
+		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+		*p4d_tramp = *p4d;
+	}
+
+	set_pgd(&trampoline_pgd_entry,
+		__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+
+	if (!kaslr_memory_enabled()) {
+		init_trampoline_default();
+		return;
+	}
+
+	if (IS_ENABLED(CONFIG_X86_5LEVEL))
+		init_trampoline_p4d();
+	else
+		init_trampoline_pud();
+}
-- 
2.11.0

WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Linus Torvalds <torvalds@linux-foundation.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>
Cc: Andi Kleen <ak@linux.intel.com>,
	Dave Hansen <dave.hansen@intel.com>,
	Andy Lutomirski <luto@amacapital.net>,
	linux-arch@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 6/8] x86/mm: Add support for 5-level paging for KASLR
Date: Thu,  6 Apr 2017 17:01:04 +0300	[thread overview]
Message-ID: <20170406140106.78087-7-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20170406140106.78087-1-kirill.shutemov@linux.intel.com>

With 5-level paging randomization happens on P4D level instead of PUD.

Maximum amount of physical memory also bumped to 52-bits for 5-level
paging.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/mm/kaslr.c | 81 ++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 62 insertions(+), 19 deletions(-)

diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index aed206475aa7..af599167fe3c 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -6,12 +6,12 @@
  *
  * Entropy is generated using the KASLR early boot functions now shared in
  * the lib directory (originally written by Kees Cook). Randomization is
- * done on PGD & PUD page table levels to increase possible addresses. The
- * physical memory mapping code was adapted to support PUD level virtual
- * addresses. This implementation on the best configuration provides 30,000
- * possible virtual addresses in average for each memory region. An additional
- * low memory page is used to ensure each CPU can start with a PGD aligned
- * virtual address (for realmode).
+ * done on PGD & P4D/PUD page table levels to increase possible addresses.
+ * The physical memory mapping code was adapted to support P4D/PUD level
+ * virtual addresses. This implementation on the best configuration provides
+ * 30,000 possible virtual addresses in average for each memory region.
+ * An additional low memory page is used to ensure each CPU can start with
+ * a PGD aligned virtual address (for realmode).
  *
  * The order of each memory region is not changed. The feature looks at
  * the available space for the regions based on different configuration
@@ -70,7 +70,7 @@ static __initdata struct kaslr_memory_region {
 	unsigned long *base;
 	unsigned long size_tb;
 } kaslr_regions[] = {
-	{ &page_offset_base, 64/* Maximum */ },
+	{ &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
 	{ &vmalloc_base, VMALLOC_SIZE_TB },
 	{ &vmemmap_base, 1 },
 };
@@ -142,7 +142,10 @@ void __init kernel_randomize_memory(void)
 		 */
 		entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
 		prandom_bytes_state(&rand_state, &rand, sizeof(rand));
-		entropy = (rand % (entropy + 1)) & PUD_MASK;
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			entropy = (rand % (entropy + 1)) & P4D_MASK;
+		else
+			entropy = (rand % (entropy + 1)) & PUD_MASK;
 		vaddr += entropy;
 		*kaslr_regions[i].base = vaddr;
 
@@ -151,27 +154,21 @@ void __init kernel_randomize_memory(void)
 		 * randomization alignment.
 		 */
 		vaddr += get_padding(&kaslr_regions[i]);
-		vaddr = round_up(vaddr + 1, PUD_SIZE);
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			vaddr = round_up(vaddr + 1, P4D_SIZE);
+		else
+			vaddr = round_up(vaddr + 1, PUD_SIZE);
 		remain_entropy -= entropy;
 	}
 }
 
-/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
- */
-void __meminit init_trampoline(void)
+static void __meminit init_trampoline_pud(void)
 {
 	unsigned long paddr, paddr_next;
 	pgd_t *pgd;
 	pud_t *pud_page, *pud_page_tramp;
 	int i;
 
-	if (!kaslr_memory_enabled()) {
-		init_trampoline_default();
-		return;
-	}
-
 	pud_page_tramp = alloc_low_page();
 
 	paddr = 0;
@@ -192,3 +189,49 @@ void __meminit init_trampoline(void)
 	set_pgd(&trampoline_pgd_entry,
 		__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
 }
+
+static void __meminit init_trampoline_p4d(void)
+{
+	unsigned long paddr, paddr_next;
+	pgd_t *pgd;
+	p4d_t *p4d_page, *p4d_page_tramp;
+	int i;
+
+	p4d_page_tramp = alloc_low_page();
+
+	paddr = 0;
+	pgd = pgd_offset_k((unsigned long)__va(paddr));
+	p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
+
+	for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+		p4d_t *p4d, *p4d_tramp;
+		unsigned long vaddr = (unsigned long)__va(paddr);
+
+		p4d_tramp = p4d_page_tramp + p4d_index(paddr);
+		p4d = p4d_page + p4d_index(vaddr);
+		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+		*p4d_tramp = *p4d;
+	}
+
+	set_pgd(&trampoline_pgd_entry,
+		__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+
+	if (!kaslr_memory_enabled()) {
+		init_trampoline_default();
+		return;
+	}
+
+	if (IS_ENABLED(CONFIG_X86_5LEVEL))
+		init_trampoline_p4d();
+	else
+		init_trampoline_pud();
+}
-- 
2.11.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-04-06 14:02 UTC|newest]

Thread overview: 101+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-04-06 14:00 [PATCH 0/8] x86: 5-level paging enabling for v4.12, Part 4 Kirill A. Shutemov
2017-04-06 14:00 ` Kirill A. Shutemov
2017-04-06 14:00 ` [PATCH 1/8] x86/boot/64: Rewrite startup_64 in C Kirill A. Shutemov
2017-04-06 14:00   ` Kirill A. Shutemov
2017-04-11  7:58   ` [tip:x86/mm] x86/boot/64: Rewrite startup_64() " tip-bot for Kirill A. Shutemov
2017-04-11  8:54     ` Ingo Molnar
2017-04-11 12:29       ` Kirill A. Shutemov
2017-04-06 14:01 ` [PATCH 2/8] x86/boot/64: Rename init_level4_pgt and early_level4_pgt Kirill A. Shutemov
2017-04-06 14:01   ` Kirill A. Shutemov
2017-04-11  7:59   ` [tip:x86/mm] x86/boot/64: Rename init_level4_pgt() and early_level4_pgt[] tip-bot for Kirill A. Shutemov
2017-04-06 14:01 ` [PATCH 3/8] x86/boot/64: Add support of additional page table level during early boot Kirill A. Shutemov
2017-04-06 14:01   ` Kirill A. Shutemov
2017-04-11  7:02   ` Ingo Molnar
2017-04-11  7:02     ` Ingo Molnar
2017-04-11 10:51     ` Kirill A. Shutemov
2017-04-11 10:51       ` Kirill A. Shutemov
2017-04-11 11:28       ` Ingo Molnar
2017-04-11 11:28         ` Ingo Molnar
2017-04-11 11:46         ` Kirill A. Shutemov
2017-04-11 11:46           ` Kirill A. Shutemov
2017-04-11 14:09           ` Andi Kleen
2017-04-11 14:09             ` Andi Kleen
2017-04-12 10:18             ` Kirill A. Shutemov
2017-04-12 10:18               ` Kirill A. Shutemov
2017-04-17 10:32               ` Ingo Molnar
2017-04-17 10:32                 ` Ingo Molnar
2017-04-18  8:59                 ` Kirill A. Shutemov
2017-04-18  8:59                   ` Kirill A. Shutemov
2017-04-18 10:15                   ` Kirill A. Shutemov
2017-04-18 10:15                     ` Kirill A. Shutemov
2017-04-18 11:10                     ` Kirill A. Shutemov
2017-04-18 11:10                       ` Kirill A. Shutemov
2017-04-06 14:01 ` [PATCH 4/8] x86/mm: Add sync_global_pgds() for configuration with 5-level paging Kirill A. Shutemov
2017-04-06 14:01   ` Kirill A. Shutemov
2017-04-06 14:01 ` [PATCH 5/8] x86/mm: Make kernel_physical_mapping_init() support " Kirill A. Shutemov
2017-04-06 14:01   ` Kirill A. Shutemov
2017-04-06 14:01 ` Kirill A. Shutemov [this message]
2017-04-06 14:01   ` [PATCH 6/8] x86/mm: Add support for 5-level paging for KASLR Kirill A. Shutemov
2017-04-06 14:01 ` [PATCH 7/8] x86: Enable 5-level paging support Kirill A. Shutemov
2017-04-06 14:01   ` Kirill A. Shutemov
2017-04-06 14:52   ` Juergen Gross
2017-04-06 14:52     ` Juergen Gross
2017-04-06 15:24     ` Kirill A. Shutemov
2017-04-06 15:24       ` Kirill A. Shutemov
2017-04-06 15:56       ` Juergen Gross
2017-04-06 15:56         ` Juergen Gross
2017-04-06 14:01 ` [PATCH 8/8] x86/mm: Allow to have userspace mappings above 47-bits Kirill A. Shutemov
2017-04-06 14:01   ` Kirill A. Shutemov
2017-04-06 18:43   ` Dmitry Safonov
2017-04-06 18:43     ` Dmitry Safonov
2017-04-06 18:43     ` Dmitry Safonov
2017-04-06 19:15     ` Dmitry Safonov
2017-04-06 19:15       ` Dmitry Safonov
2017-04-06 19:15       ` Dmitry Safonov
2017-04-06 23:21       ` Kirill A. Shutemov
2017-04-06 23:21         ` Kirill A. Shutemov
2017-04-06 23:24         ` [PATCHv2 " Kirill A. Shutemov
2017-04-06 23:24           ` Kirill A. Shutemov
2017-04-07 11:32           ` Dmitry Safonov
2017-04-07 11:32             ` Dmitry Safonov
2017-04-07 11:32             ` Dmitry Safonov
2017-04-07 15:44             ` [PATCHv3 " Kirill A. Shutemov
2017-04-07 15:44               ` Kirill A. Shutemov
2017-04-07 16:37               ` Dmitry Safonov
2017-04-07 16:37                 ` Dmitry Safonov
2017-04-07 16:37                 ` Dmitry Safonov
2017-04-13 11:30             ` [PATCHv4 0/9] x86: 5-level paging enabling for v4.12, Part 4 Kirill A. Shutemov
2017-04-13 11:30               ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 1/9] x86/asm: Fix comment in return_from_SYSCALL_64 Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 2/9] x86/boot/64: Rewrite startup_64 in C Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 3/9] x86/boot/64: Rename init_level4_pgt and early_level4_pgt Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 4/9] x86/boot/64: Add support of additional page table level during early boot Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 5/9] x86/mm: Add sync_global_pgds() for configuration with 5-level paging Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 6/9] x86/mm: Make kernel_physical_mapping_init() support " Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 7/9] x86/mm: Add support for 5-level paging for KASLR Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 8/9] x86: Enable 5-level paging support Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-13 11:30               ` [PATCHv4 9/9] x86/mm: Allow to have userspace mappings above 47-bits Kirill A. Shutemov
2017-04-13 11:30                 ` Kirill A. Shutemov
2017-04-07 10:06         ` [PATCH 8/8] " Dmitry Safonov
2017-04-07 10:06           ` Dmitry Safonov
2017-04-07 10:06           ` Dmitry Safonov
2017-04-07 13:35   ` Anshuman Khandual
2017-04-07 13:35     ` Anshuman Khandual
2017-04-07 15:59     ` Kirill A. Shutemov
2017-04-07 15:59       ` Kirill A. Shutemov
2017-04-07 16:09       ` hpa
2017-04-07 16:09         ` hpa
2017-04-07 16:20         ` Kirill A. Shutemov
2017-04-07 16:20           ` Kirill A. Shutemov
2017-04-12 10:41       ` Michael Ellerman
2017-04-12 10:41         ` Michael Ellerman
2017-04-12 11:11         ` Kirill A. Shutemov
2017-04-12 11:11           ` Kirill A. Shutemov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170406140106.78087-7-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=dave.hansen@intel.com \
    --cc=hpa@zytor.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@amacapital.net \
    --cc=mingo@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.