All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kees Cook <keescook@chromium.org>
To: Ingo Molnar <mingo@kernel.org>
Cc: Kees Cook <keescook@chromium.org>,
	Yinghai Lu <yinghai@kernel.org>, Baoquan He <bhe@redhat.com>,
	Ard Biesheuvel <ard.biesheuvel@linaro.org>,
	Matt Redfearn <matt.redfearn@imgtec.com>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Vivek Goyal <vgoyal@redhat.com>,
	Andy Lutomirski <luto@kernel.org>,
	lasse.collin@tukaani.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Dave Young <dyoung@redhat.com>,
	kernel-hardening@lists.openwall.com,
	LKML <linux-kernel@vger.kernel.org>
Subject: [PATCH v5 11/21] x86, boot: Split out kernel_ident_mapping_init
Date: Thu, 14 Apr 2016 15:29:04 -0700	[thread overview]
Message-ID: <1460672954-32567-12-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1460672954-32567-1-git-send-email-keescook@chromium.org>

From: Yinghai Lu <yinghai@kernel.org>

In order to support on-demand page table creation when moving the
kernel for KASLR, we need to use kernel_ident_mapping_init in the
decompression code. This splits it out into its own file for use outside
of init_64.c. Additionally, checking for __pa/__va defines is added
since they need to be overridden in the decompression code.

Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
[kees: rewrote changelog]
Signed-off-by: Kees Cook <keescook@chromium.org>
---
 arch/x86/include/asm/page.h |  5 +++
 arch/x86/mm/ident_map.c     | 74 +++++++++++++++++++++++++++++++++++++++++++++
 arch/x86/mm/init_64.c       | 74 +--------------------------------------------
 3 files changed, 80 insertions(+), 73 deletions(-)
 create mode 100644 arch/x86/mm/ident_map.c

diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 802dde30c928..cf8f619b305f 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -37,7 +37,10 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
 	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
+#ifndef __pa
 #define __pa(x)		__phys_addr((unsigned long)(x))
+#endif
+
 #define __pa_nodebug(x)	__phys_addr_nodebug((unsigned long)(x))
 /* __pa_symbol should be used for C visible symbols.
    This seems to be the official gcc blessed way to do such arithmetic. */
@@ -51,7 +54,9 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
 #define __pa_symbol(x) \
 	__phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
 
+#ifndef __va
 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
+#endif
 
 #define __boot_va(x)		__va(x)
 #define __boot_pa(x)		__pa(x)
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
new file mode 100644
index 000000000000..751ca920773a
--- /dev/null
+++ b/arch/x86/mm/ident_map.c
@@ -0,0 +1,74 @@
+
+static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+			   unsigned long addr, unsigned long end)
+{
+	addr &= PMD_MASK;
+	for (; addr < end; addr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(addr);
+
+		if (!pmd_present(*pmd))
+			set_pmd(pmd, __pmd(addr | pmd_flag));
+	}
+}
+static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
+			  unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+
+	for (; addr < end; addr = next) {
+		pud_t *pud = pud_page + pud_index(addr);
+		pmd_t *pmd;
+
+		next = (addr & PUD_MASK) + PUD_SIZE;
+		if (next > end)
+			next = end;
+
+		if (pud_present(*pud)) {
+			pmd = pmd_offset(pud, 0);
+			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			continue;
+		}
+		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
+		if (!pmd)
+			return -ENOMEM;
+		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+	}
+
+	return 0;
+}
+
+int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
+			      unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+	int result;
+	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
+
+	for (; addr < end; addr = next) {
+		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+		pud_t *pud;
+
+		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
+		if (next > end)
+			next = end;
+
+		if (pgd_present(*pgd)) {
+			pud = pud_offset(pgd, 0);
+			result = ident_pud_init(info, pud, addr, next);
+			if (result)
+				return result;
+			continue;
+		}
+
+		pud = (pud_t *)info->alloc_pgt_page(info->context);
+		if (!pud)
+			return -ENOMEM;
+		result = ident_pud_init(info, pud, addr, next);
+		if (result)
+			return result;
+		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
+	}
+
+	return 0;
+}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 214afda97911..65cfbeefbec4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -58,79 +58,7 @@
 
 #include "mm_internal.h"
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
-			   unsigned long addr, unsigned long end)
-{
-	addr &= PMD_MASK;
-	for (; addr < end; addr += PMD_SIZE) {
-		pmd_t *pmd = pmd_page + pmd_index(addr);
-
-		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
-	}
-}
-static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
-			  unsigned long addr, unsigned long end)
-{
-	unsigned long next;
-
-	for (; addr < end; addr = next) {
-		pud_t *pud = pud_page + pud_index(addr);
-		pmd_t *pmd;
-
-		next = (addr & PUD_MASK) + PUD_SIZE;
-		if (next > end)
-			next = end;
-
-		if (pud_present(*pud)) {
-			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
-			continue;
-		}
-		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
-		if (!pmd)
-			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
-		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
-	}
-
-	return 0;
-}
-
-int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-			      unsigned long addr, unsigned long end)
-{
-	unsigned long next;
-	int result;
-	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
-
-	for (; addr < end; addr = next) {
-		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
-		pud_t *pud;
-
-		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
-		if (next > end)
-			next = end;
-
-		if (pgd_present(*pgd)) {
-			pud = pud_offset(pgd, 0);
-			result = ident_pud_init(info, pud, addr, next);
-			if (result)
-				return result;
-			continue;
-		}
-
-		pud = (pud_t *)info->alloc_pgt_page(info->context);
-		if (!pud)
-			return -ENOMEM;
-		result = ident_pud_init(info, pud, addr, next);
-		if (result)
-			return result;
-		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
-	}
-
-	return 0;
-}
+#include "ident_map.c"
 
 /*
  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
-- 
2.6.3

WARNING: multiple messages have this Message-ID (diff)
From: Kees Cook <keescook@chromium.org>
To: Ingo Molnar <mingo@kernel.org>
Cc: Kees Cook <keescook@chromium.org>,
	Yinghai Lu <yinghai@kernel.org>, Baoquan He <bhe@redhat.com>,
	Ard Biesheuvel <ard.biesheuvel@linaro.org>,
	Matt Redfearn <matt.redfearn@imgtec.com>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Vivek Goyal <vgoyal@redhat.com>,
	Andy Lutomirski <luto@kernel.org>,
	lasse.collin@tukaani.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Dave Young <dyoung@redhat.com>,
	kernel-hardening@lists.openwall.com,
	LKML <linux-kernel@vger.kernel.org>
Subject: [kernel-hardening] [PATCH v5 11/21] x86, boot: Split out kernel_ident_mapping_init
Date: Thu, 14 Apr 2016 15:29:04 -0700	[thread overview]
Message-ID: <1460672954-32567-12-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1460672954-32567-1-git-send-email-keescook@chromium.org>

From: Yinghai Lu <yinghai@kernel.org>

In order to support on-demand page table creation when moving the
kernel for KASLR, we need to use kernel_ident_mapping_init in the
decompression code. This splits it out into its own file for use outside
of init_64.c. Additionally, checking for __pa/__va defines is added
since they need to be overridden in the decompression code.

Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
[kees: rewrote changelog]
Signed-off-by: Kees Cook <keescook@chromium.org>
---
 arch/x86/include/asm/page.h |  5 +++
 arch/x86/mm/ident_map.c     | 74 +++++++++++++++++++++++++++++++++++++++++++++
 arch/x86/mm/init_64.c       | 74 +--------------------------------------------
 3 files changed, 80 insertions(+), 73 deletions(-)
 create mode 100644 arch/x86/mm/ident_map.c

diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 802dde30c928..cf8f619b305f 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -37,7 +37,10 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
 	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
+#ifndef __pa
 #define __pa(x)		__phys_addr((unsigned long)(x))
+#endif
+
 #define __pa_nodebug(x)	__phys_addr_nodebug((unsigned long)(x))
 /* __pa_symbol should be used for C visible symbols.
    This seems to be the official gcc blessed way to do such arithmetic. */
@@ -51,7 +54,9 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
 #define __pa_symbol(x) \
 	__phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
 
+#ifndef __va
 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
+#endif
 
 #define __boot_va(x)		__va(x)
 #define __boot_pa(x)		__pa(x)
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
new file mode 100644
index 000000000000..751ca920773a
--- /dev/null
+++ b/arch/x86/mm/ident_map.c
@@ -0,0 +1,74 @@
+
+static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+			   unsigned long addr, unsigned long end)
+{
+	addr &= PMD_MASK;
+	for (; addr < end; addr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(addr);
+
+		if (!pmd_present(*pmd))
+			set_pmd(pmd, __pmd(addr | pmd_flag));
+	}
+}
+static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
+			  unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+
+	for (; addr < end; addr = next) {
+		pud_t *pud = pud_page + pud_index(addr);
+		pmd_t *pmd;
+
+		next = (addr & PUD_MASK) + PUD_SIZE;
+		if (next > end)
+			next = end;
+
+		if (pud_present(*pud)) {
+			pmd = pmd_offset(pud, 0);
+			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			continue;
+		}
+		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
+		if (!pmd)
+			return -ENOMEM;
+		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+	}
+
+	return 0;
+}
+
+int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
+			      unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+	int result;
+	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
+
+	for (; addr < end; addr = next) {
+		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+		pud_t *pud;
+
+		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
+		if (next > end)
+			next = end;
+
+		if (pgd_present(*pgd)) {
+			pud = pud_offset(pgd, 0);
+			result = ident_pud_init(info, pud, addr, next);
+			if (result)
+				return result;
+			continue;
+		}
+
+		pud = (pud_t *)info->alloc_pgt_page(info->context);
+		if (!pud)
+			return -ENOMEM;
+		result = ident_pud_init(info, pud, addr, next);
+		if (result)
+			return result;
+		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
+	}
+
+	return 0;
+}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 214afda97911..65cfbeefbec4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -58,79 +58,7 @@
 
 #include "mm_internal.h"
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
-			   unsigned long addr, unsigned long end)
-{
-	addr &= PMD_MASK;
-	for (; addr < end; addr += PMD_SIZE) {
-		pmd_t *pmd = pmd_page + pmd_index(addr);
-
-		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
-	}
-}
-static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
-			  unsigned long addr, unsigned long end)
-{
-	unsigned long next;
-
-	for (; addr < end; addr = next) {
-		pud_t *pud = pud_page + pud_index(addr);
-		pmd_t *pmd;
-
-		next = (addr & PUD_MASK) + PUD_SIZE;
-		if (next > end)
-			next = end;
-
-		if (pud_present(*pud)) {
-			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
-			continue;
-		}
-		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
-		if (!pmd)
-			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
-		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
-	}
-
-	return 0;
-}
-
-int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-			      unsigned long addr, unsigned long end)
-{
-	unsigned long next;
-	int result;
-	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
-
-	for (; addr < end; addr = next) {
-		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
-		pud_t *pud;
-
-		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
-		if (next > end)
-			next = end;
-
-		if (pgd_present(*pgd)) {
-			pud = pud_offset(pgd, 0);
-			result = ident_pud_init(info, pud, addr, next);
-			if (result)
-				return result;
-			continue;
-		}
-
-		pud = (pud_t *)info->alloc_pgt_page(info->context);
-		if (!pud)
-			return -ENOMEM;
-		result = ident_pud_init(info, pud, addr, next);
-		if (result)
-			return result;
-		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
-	}
-
-	return 0;
-}
+#include "ident_map.c"
 
 /*
  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
-- 
2.6.3

  parent reply	other threads:[~2016-04-14 22:32 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-14 22:28 [PATCH v5 00/21] x86, boot: KASLR cleanup and 64-bit improvements Kees Cook
2016-04-14 22:28 ` [kernel-hardening] " Kees Cook
2016-04-14 22:28 ` [PATCH v5 01/21] x86, KASLR: Remove unneeded boot_params argument Kees Cook
2016-04-14 22:28   ` [kernel-hardening] " Kees Cook
2016-04-15  7:29   ` Ingo Molnar
2016-04-15  7:29     ` [kernel-hardening] " Ingo Molnar
2016-04-15 18:55     ` Kees Cook
2016-04-15 18:55       ` [kernel-hardening] " Kees Cook
2016-04-14 22:28 ` [PATCH v5 02/21] x86, KASLR: Handle kernel relocation above 2G Kees Cook
2016-04-14 22:28   ` [kernel-hardening] " Kees Cook
2016-04-15  7:47   ` Ingo Molnar
2016-04-15  7:47     ` [kernel-hardening] " Ingo Molnar
2016-04-15 19:01     ` Kees Cook
2016-04-15 19:01       ` [kernel-hardening] " Kees Cook
2016-04-14 22:28 ` [PATCH v5 03/21] x86, KASLR: Drop CONFIG_RANDOMIZE_BASE_MAX_OFFSET Kees Cook
2016-04-14 22:28   ` [kernel-hardening] " Kees Cook
2016-04-15  8:07   ` Ingo Molnar
2016-04-15  8:07     ` [kernel-hardening] " Ingo Molnar
2016-04-15 19:12     ` Kees Cook
2016-04-15 19:12       ` [kernel-hardening] " Kees Cook
2016-04-16  8:42       ` Ingo Molnar
2016-04-16  8:42         ` [kernel-hardening] " Ingo Molnar
2016-04-14 22:28 ` [PATCH v5 04/21] x86, boot: Move compressed kernel to end of decompression buffer Kees Cook
2016-04-14 22:28   ` [kernel-hardening] " Kees Cook
2016-04-15  8:09   ` Ingo Molnar
2016-04-15  8:09     ` [kernel-hardening] " Ingo Molnar
2016-04-18 16:50     ` Kees Cook
2016-04-18 16:50       ` [kernel-hardening] " Kees Cook
2016-04-15  9:05   ` Ingo Molnar
2016-04-15  9:05     ` [kernel-hardening] " Ingo Molnar
2016-04-14 22:28 ` [PATCH v5 05/21] x86, boot: Calculate decompression size during boot not build Kees Cook
2016-04-14 22:28   ` [kernel-hardening] " Kees Cook
2016-04-15  8:12   ` Ingo Molnar
2016-04-15  8:12     ` [kernel-hardening] " Ingo Molnar
2016-04-15 19:14     ` Kees Cook
2016-04-15 19:14       ` [kernel-hardening] " Kees Cook
2016-04-14 22:28 ` [PATCH v5 06/21] x86, KASLR: Update description for decompressor worst case size Kees Cook
2016-04-14 22:28   ` [kernel-hardening] " Kees Cook
2016-04-15 16:17   ` Lasse Collin
2016-04-15 16:17     ` [kernel-hardening] " Lasse Collin
2016-04-14 22:29 ` [PATCH v5 07/21] x86, boot: Fix run_size calculation Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-15  8:31   ` Ingo Molnar
2016-04-15  8:31     ` [kernel-hardening] " Ingo Molnar
2016-04-15 19:26     ` Kees Cook
2016-04-15 19:26       ` [kernel-hardening] " Kees Cook
2016-04-16  9:00       ` Ingo Molnar
2016-04-16  9:00         ` [kernel-hardening] " Ingo Molnar
2016-04-14 22:29 ` [PATCH v5 08/21] x86, KASLR: Clean up unused code from old run_size Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 09/21] x86, KASLR: Correctly bounds-check relocations Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 10/21] x86, KASLR: Consolidate mem_avoid entries Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` Kees Cook [this message]
2016-04-14 22:29   ` [kernel-hardening] [PATCH v5 11/21] x86, boot: Split out kernel_ident_mapping_init Kees Cook
2016-04-14 22:29 ` [PATCH v5 12/21] x86, 64bit: Set ident_mapping for KASLR Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 13/21] x86, boot: Report overlap failures in memcpy Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-15 14:42   ` Lasse Collin
2016-04-15 14:42     ` [kernel-hardening] " Lasse Collin
2016-04-15 19:28     ` Kees Cook
2016-04-15 19:28       ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 14/21] x86, KASLR: Add slot_area to manage random slots Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 15/21] x86, KASLR: Add slot_area support functions Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 16/21] x86, KASLR: Add virtual address choosing function Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 17/21] x86, KASLR: Clarify purpose of each get_random_long Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 18/21] x86, KASLR: Randomize virtual address separately Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 19/21] x86, KASLR: Add physical address randomization >4G Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 20/21] x86, KASLR: Remove unused slot tracking code Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook
2016-04-14 22:29 ` [PATCH v5 21/21] x86, KASLR: Allow randomization below load address Kees Cook
2016-04-14 22:29   ` [kernel-hardening] " Kees Cook

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1460672954-32567-12-git-send-email-keescook@chromium.org \
    --to=keescook@chromium.org \
    --cc=akpm@linux-foundation.org \
    --cc=ard.biesheuvel@linaro.org \
    --cc=bhe@redhat.com \
    --cc=bp@alien8.de \
    --cc=dyoung@redhat.com \
    --cc=hpa@zytor.com \
    --cc=kernel-hardening@lists.openwall.com \
    --cc=lasse.collin@tukaani.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=matt.redfearn@imgtec.com \
    --cc=mingo@kernel.org \
    --cc=mingo@redhat.com \
    --cc=vgoyal@redhat.com \
    --cc=x86@kernel.org \
    --cc=yinghai@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.