All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64)
@ 2016-07-27 15:59 ` Thomas Garnier
  0 siblings, 0 replies; 9+ messages in thread
From: Thomas Garnier @ 2016-07-27 15:59 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H . Peter Anvin, Thomas Garnier, Kees Cook
  Cc: x86, linux-kernel, kernel-hardening

Add vmemmap in the list of randomized memory regions.

The vmemmap region holds a representation of the physical memory (through
a struct page array). An attacker could use this region to disclose the
kernel memory layout (walking the page linked list).

Signed-off-by: Thomas Garnier <thgarnie@google.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
---
Missing patch didn't pick-up by the tip bot on KASLR memory randomization.
Resending after rebase on tip and tests as discussed with Ingo.
Based on tip 4bcc8cf6ab5932cbb2511c8e18065e61b069f21c
---
 arch/x86/include/asm/kaslr.h            |  1 +
 arch/x86/include/asm/pgtable_64_types.h |  4 +++-
 arch/x86/mm/kaslr.c                     | 24 +++++++++++++++++++++++-
 3 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
index 2674ee3..1052a79 100644
--- a/arch/x86/include/asm/kaslr.h
+++ b/arch/x86/include/asm/kaslr.h
@@ -6,6 +6,7 @@ unsigned long kaslr_get_random_long(const char *purpose);
 #ifdef CONFIG_RANDOMIZE_MEMORY
 extern unsigned long page_offset_base;
 extern unsigned long vmalloc_base;
+extern unsigned long vmemmap_base;
 
 void kernel_randomize_memory(void);
 #else
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 6fdef9e..3a26420 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -57,11 +57,13 @@ typedef struct { pteval_t pte; } pte_t;
 #define MAXMEM		_AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
 #define VMALLOC_SIZE_TB	_AC(32, UL)
 #define __VMALLOC_BASE	_AC(0xffffc90000000000, UL)
-#define VMEMMAP_START	_AC(0xffffea0000000000, UL)
+#define __VMEMMAP_BASE	_AC(0xffffea0000000000, UL)
 #ifdef CONFIG_RANDOMIZE_MEMORY
 #define VMALLOC_START	vmalloc_base
+#define VMEMMAP_START	vmemmap_base
 #else
 #define VMALLOC_START	__VMALLOC_BASE
+#define VMEMMAP_START	__VMEMMAP_BASE
 #endif /* CONFIG_RANDOMIZE_MEMORY */
 #define VMALLOC_END	(VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
 #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 26dccd6..3e9875f 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -44,13 +44,22 @@
  * ensure that this order is correct and won't be changed.
  */
 static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
-static const unsigned long vaddr_end = VMEMMAP_START;
+
+#if defined(CONFIG_X86_ESPFIX64)
+static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
+#elif defined(CONFIG_EFI)
+static const unsigned long vaddr_end = EFI_VA_START;
+#else
+static const unsigned long vaddr_end = __START_KERNEL_map;
+#endif
 
 /* Default values */
 unsigned long page_offset_base = __PAGE_OFFSET_BASE;
 EXPORT_SYMBOL(page_offset_base);
 unsigned long vmalloc_base = __VMALLOC_BASE;
 EXPORT_SYMBOL(vmalloc_base);
+unsigned long vmemmap_base = __VMEMMAP_BASE;
+EXPORT_SYMBOL(vmemmap_base);
 
 /*
  * Memory regions randomized by KASLR (except modules that use a separate logic
@@ -63,6 +72,7 @@ static __initdata struct kaslr_memory_region {
 } kaslr_regions[] = {
 	{ &page_offset_base, 64/* Maximum */ },
 	{ &vmalloc_base, VMALLOC_SIZE_TB },
+	{ &vmemmap_base, 1 },
 };
 
 /* Get size in bytes used by the memory region */
@@ -89,6 +99,18 @@ void __init kernel_randomize_memory(void)
 	struct rnd_state rand_state;
 	unsigned long remain_entropy;
 
+	/*
+	 * All these BUILD_BUG_ON checks ensures the memory layout is
+	 * consistent with the vaddr_start/vaddr_end variables.
+	 */
+	BUILD_BUG_ON(vaddr_start >= vaddr_end);
+	BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
+		     vaddr_end >= EFI_VA_START);
+	BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
+		      config_enabled(CONFIG_EFI)) &&
+		     vaddr_end >= __START_KERNEL_map);
+	BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
+
 	if (!kaslr_memory_enabled())
 		return;
 
-- 
2.8.0.rc3.226.g39d4020

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [kernel-hardening] [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64)
@ 2016-07-27 15:59 ` Thomas Garnier
  0 siblings, 0 replies; 9+ messages in thread
From: Thomas Garnier @ 2016-07-27 15:59 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H . Peter Anvin, Thomas Garnier, Kees Cook
  Cc: x86, linux-kernel, kernel-hardening

Add vmemmap in the list of randomized memory regions.

The vmemmap region holds a representation of the physical memory (through
a struct page array). An attacker could use this region to disclose the
kernel memory layout (walking the page linked list).

Signed-off-by: Thomas Garnier <thgarnie@google.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
---
Missing patch didn't pick-up by the tip bot on KASLR memory randomization.
Resending after rebase on tip and tests as discussed with Ingo.
Based on tip 4bcc8cf6ab5932cbb2511c8e18065e61b069f21c
---
 arch/x86/include/asm/kaslr.h            |  1 +
 arch/x86/include/asm/pgtable_64_types.h |  4 +++-
 arch/x86/mm/kaslr.c                     | 24 +++++++++++++++++++++++-
 3 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
index 2674ee3..1052a79 100644
--- a/arch/x86/include/asm/kaslr.h
+++ b/arch/x86/include/asm/kaslr.h
@@ -6,6 +6,7 @@ unsigned long kaslr_get_random_long(const char *purpose);
 #ifdef CONFIG_RANDOMIZE_MEMORY
 extern unsigned long page_offset_base;
 extern unsigned long vmalloc_base;
+extern unsigned long vmemmap_base;
 
 void kernel_randomize_memory(void);
 #else
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 6fdef9e..3a26420 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -57,11 +57,13 @@ typedef struct { pteval_t pte; } pte_t;
 #define MAXMEM		_AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
 #define VMALLOC_SIZE_TB	_AC(32, UL)
 #define __VMALLOC_BASE	_AC(0xffffc90000000000, UL)
-#define VMEMMAP_START	_AC(0xffffea0000000000, UL)
+#define __VMEMMAP_BASE	_AC(0xffffea0000000000, UL)
 #ifdef CONFIG_RANDOMIZE_MEMORY
 #define VMALLOC_START	vmalloc_base
+#define VMEMMAP_START	vmemmap_base
 #else
 #define VMALLOC_START	__VMALLOC_BASE
+#define VMEMMAP_START	__VMEMMAP_BASE
 #endif /* CONFIG_RANDOMIZE_MEMORY */
 #define VMALLOC_END	(VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
 #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 26dccd6..3e9875f 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -44,13 +44,22 @@
  * ensure that this order is correct and won't be changed.
  */
 static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
-static const unsigned long vaddr_end = VMEMMAP_START;
+
+#if defined(CONFIG_X86_ESPFIX64)
+static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
+#elif defined(CONFIG_EFI)
+static const unsigned long vaddr_end = EFI_VA_START;
+#else
+static const unsigned long vaddr_end = __START_KERNEL_map;
+#endif
 
 /* Default values */
 unsigned long page_offset_base = __PAGE_OFFSET_BASE;
 EXPORT_SYMBOL(page_offset_base);
 unsigned long vmalloc_base = __VMALLOC_BASE;
 EXPORT_SYMBOL(vmalloc_base);
+unsigned long vmemmap_base = __VMEMMAP_BASE;
+EXPORT_SYMBOL(vmemmap_base);
 
 /*
  * Memory regions randomized by KASLR (except modules that use a separate logic
@@ -63,6 +72,7 @@ static __initdata struct kaslr_memory_region {
 } kaslr_regions[] = {
 	{ &page_offset_base, 64/* Maximum */ },
 	{ &vmalloc_base, VMALLOC_SIZE_TB },
+	{ &vmemmap_base, 1 },
 };
 
 /* Get size in bytes used by the memory region */
@@ -89,6 +99,18 @@ void __init kernel_randomize_memory(void)
 	struct rnd_state rand_state;
 	unsigned long remain_entropy;
 
+	/*
+	 * All these BUILD_BUG_ON checks ensures the memory layout is
+	 * consistent with the vaddr_start/vaddr_end variables.
+	 */
+	BUILD_BUG_ON(vaddr_start >= vaddr_end);
+	BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
+		     vaddr_end >= EFI_VA_START);
+	BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
+		      config_enabled(CONFIG_EFI)) &&
+		     vaddr_end >= __START_KERNEL_map);
+	BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
+
 	if (!kaslr_memory_enabled())
 		return;
 
-- 
2.8.0.rc3.226.g39d4020

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64)
  2016-07-27 15:59 ` [kernel-hardening] " Thomas Garnier
@ 2016-08-01 17:09   ` Thomas Garnier
  -1 siblings, 0 replies; 9+ messages in thread
From: Thomas Garnier @ 2016-08-01 17:09 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H . Peter Anvin, Thomas Garnier, Kees Cook
  Cc: x86, LKML, kernel-hardening

On Wed, Jul 27, 2016 at 8:59 AM, Thomas Garnier <thgarnie@google.com> wrote:
> Add vmemmap in the list of randomized memory regions.
>
> The vmemmap region holds a representation of the physical memory (through
> a struct page array). An attacker could use this region to disclose the
> kernel memory layout (walking the page linked list).
>
> Signed-off-by: Thomas Garnier <thgarnie@google.com>
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
> Missing patch didn't pick-up by the tip bot on KASLR memory randomization.
> Resending after rebase on tip and tests as discussed with Ingo.
> Based on tip 4bcc8cf6ab5932cbb2511c8e18065e61b069f21c

Ingo: Any comment? Can you integrate it on tip?

> ---
>  arch/x86/include/asm/kaslr.h            |  1 +
>  arch/x86/include/asm/pgtable_64_types.h |  4 +++-
>  arch/x86/mm/kaslr.c                     | 24 +++++++++++++++++++++++-
>  3 files changed, 27 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
> index 2674ee3..1052a79 100644
> --- a/arch/x86/include/asm/kaslr.h
> +++ b/arch/x86/include/asm/kaslr.h
> @@ -6,6 +6,7 @@ unsigned long kaslr_get_random_long(const char *purpose);
>  #ifdef CONFIG_RANDOMIZE_MEMORY
>  extern unsigned long page_offset_base;
>  extern unsigned long vmalloc_base;
> +extern unsigned long vmemmap_base;
>
>  void kernel_randomize_memory(void);
>  #else
> diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
> index 6fdef9e..3a26420 100644
> --- a/arch/x86/include/asm/pgtable_64_types.h
> +++ b/arch/x86/include/asm/pgtable_64_types.h
> @@ -57,11 +57,13 @@ typedef struct { pteval_t pte; } pte_t;
>  #define MAXMEM         _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
>  #define VMALLOC_SIZE_TB        _AC(32, UL)
>  #define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
> -#define VMEMMAP_START  _AC(0xffffea0000000000, UL)
> +#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
>  #ifdef CONFIG_RANDOMIZE_MEMORY
>  #define VMALLOC_START  vmalloc_base
> +#define VMEMMAP_START  vmemmap_base
>  #else
>  #define VMALLOC_START  __VMALLOC_BASE
> +#define VMEMMAP_START  __VMEMMAP_BASE
>  #endif /* CONFIG_RANDOMIZE_MEMORY */
>  #define VMALLOC_END    (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
>  #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
> diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
> index 26dccd6..3e9875f 100644
> --- a/arch/x86/mm/kaslr.c
> +++ b/arch/x86/mm/kaslr.c
> @@ -44,13 +44,22 @@
>   * ensure that this order is correct and won't be changed.
>   */
>  static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
> -static const unsigned long vaddr_end = VMEMMAP_START;
> +
> +#if defined(CONFIG_X86_ESPFIX64)
> +static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
> +#elif defined(CONFIG_EFI)
> +static const unsigned long vaddr_end = EFI_VA_START;
> +#else
> +static const unsigned long vaddr_end = __START_KERNEL_map;
> +#endif
>
>  /* Default values */
>  unsigned long page_offset_base = __PAGE_OFFSET_BASE;
>  EXPORT_SYMBOL(page_offset_base);
>  unsigned long vmalloc_base = __VMALLOC_BASE;
>  EXPORT_SYMBOL(vmalloc_base);
> +unsigned long vmemmap_base = __VMEMMAP_BASE;
> +EXPORT_SYMBOL(vmemmap_base);
>
>  /*
>   * Memory regions randomized by KASLR (except modules that use a separate logic
> @@ -63,6 +72,7 @@ static __initdata struct kaslr_memory_region {
>  } kaslr_regions[] = {
>         { &page_offset_base, 64/* Maximum */ },
>         { &vmalloc_base, VMALLOC_SIZE_TB },
> +       { &vmemmap_base, 1 },
>  };
>
>  /* Get size in bytes used by the memory region */
> @@ -89,6 +99,18 @@ void __init kernel_randomize_memory(void)
>         struct rnd_state rand_state;
>         unsigned long remain_entropy;
>
> +       /*
> +        * All these BUILD_BUG_ON checks ensures the memory layout is
> +        * consistent with the vaddr_start/vaddr_end variables.
> +        */
> +       BUILD_BUG_ON(vaddr_start >= vaddr_end);
> +       BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
> +                    vaddr_end >= EFI_VA_START);
> +       BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
> +                     config_enabled(CONFIG_EFI)) &&
> +                    vaddr_end >= __START_KERNEL_map);
> +       BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
> +
>         if (!kaslr_memory_enabled())
>                 return;
>
> --
> 2.8.0.rc3.226.g39d4020
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [kernel-hardening] Re: [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64)
@ 2016-08-01 17:09   ` Thomas Garnier
  0 siblings, 0 replies; 9+ messages in thread
From: Thomas Garnier @ 2016-08-01 17:09 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H . Peter Anvin, Thomas Garnier, Kees Cook
  Cc: x86, LKML, kernel-hardening

On Wed, Jul 27, 2016 at 8:59 AM, Thomas Garnier <thgarnie@google.com> wrote:
> Add vmemmap in the list of randomized memory regions.
>
> The vmemmap region holds a representation of the physical memory (through
> a struct page array). An attacker could use this region to disclose the
> kernel memory layout (walking the page linked list).
>
> Signed-off-by: Thomas Garnier <thgarnie@google.com>
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
> Missing patch didn't pick-up by the tip bot on KASLR memory randomization.
> Resending after rebase on tip and tests as discussed with Ingo.
> Based on tip 4bcc8cf6ab5932cbb2511c8e18065e61b069f21c

Ingo: Any comment? Can you integrate it on tip?

> ---
>  arch/x86/include/asm/kaslr.h            |  1 +
>  arch/x86/include/asm/pgtable_64_types.h |  4 +++-
>  arch/x86/mm/kaslr.c                     | 24 +++++++++++++++++++++++-
>  3 files changed, 27 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
> index 2674ee3..1052a79 100644
> --- a/arch/x86/include/asm/kaslr.h
> +++ b/arch/x86/include/asm/kaslr.h
> @@ -6,6 +6,7 @@ unsigned long kaslr_get_random_long(const char *purpose);
>  #ifdef CONFIG_RANDOMIZE_MEMORY
>  extern unsigned long page_offset_base;
>  extern unsigned long vmalloc_base;
> +extern unsigned long vmemmap_base;
>
>  void kernel_randomize_memory(void);
>  #else
> diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
> index 6fdef9e..3a26420 100644
> --- a/arch/x86/include/asm/pgtable_64_types.h
> +++ b/arch/x86/include/asm/pgtable_64_types.h
> @@ -57,11 +57,13 @@ typedef struct { pteval_t pte; } pte_t;
>  #define MAXMEM         _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
>  #define VMALLOC_SIZE_TB        _AC(32, UL)
>  #define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
> -#define VMEMMAP_START  _AC(0xffffea0000000000, UL)
> +#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
>  #ifdef CONFIG_RANDOMIZE_MEMORY
>  #define VMALLOC_START  vmalloc_base
> +#define VMEMMAP_START  vmemmap_base
>  #else
>  #define VMALLOC_START  __VMALLOC_BASE
> +#define VMEMMAP_START  __VMEMMAP_BASE
>  #endif /* CONFIG_RANDOMIZE_MEMORY */
>  #define VMALLOC_END    (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
>  #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
> diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
> index 26dccd6..3e9875f 100644
> --- a/arch/x86/mm/kaslr.c
> +++ b/arch/x86/mm/kaslr.c
> @@ -44,13 +44,22 @@
>   * ensure that this order is correct and won't be changed.
>   */
>  static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
> -static const unsigned long vaddr_end = VMEMMAP_START;
> +
> +#if defined(CONFIG_X86_ESPFIX64)
> +static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
> +#elif defined(CONFIG_EFI)
> +static const unsigned long vaddr_end = EFI_VA_START;
> +#else
> +static const unsigned long vaddr_end = __START_KERNEL_map;
> +#endif
>
>  /* Default values */
>  unsigned long page_offset_base = __PAGE_OFFSET_BASE;
>  EXPORT_SYMBOL(page_offset_base);
>  unsigned long vmalloc_base = __VMALLOC_BASE;
>  EXPORT_SYMBOL(vmalloc_base);
> +unsigned long vmemmap_base = __VMEMMAP_BASE;
> +EXPORT_SYMBOL(vmemmap_base);
>
>  /*
>   * Memory regions randomized by KASLR (except modules that use a separate logic
> @@ -63,6 +72,7 @@ static __initdata struct kaslr_memory_region {
>  } kaslr_regions[] = {
>         { &page_offset_base, 64/* Maximum */ },
>         { &vmalloc_base, VMALLOC_SIZE_TB },
> +       { &vmemmap_base, 1 },
>  };
>
>  /* Get size in bytes used by the memory region */
> @@ -89,6 +99,18 @@ void __init kernel_randomize_memory(void)
>         struct rnd_state rand_state;
>         unsigned long remain_entropy;
>
> +       /*
> +        * All these BUILD_BUG_ON checks ensures the memory layout is
> +        * consistent with the vaddr_start/vaddr_end variables.
> +        */
> +       BUILD_BUG_ON(vaddr_start >= vaddr_end);
> +       BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
> +                    vaddr_end >= EFI_VA_START);
> +       BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
> +                     config_enabled(CONFIG_EFI)) &&
> +                    vaddr_end >= __START_KERNEL_map);
> +       BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
> +
>         if (!kaslr_memory_enabled())
>                 return;
>
> --
> 2.8.0.rc3.226.g39d4020
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64)
  2016-08-01 17:09   ` [kernel-hardening] " Thomas Garnier
@ 2016-08-02  8:14     ` Ingo Molnar
  -1 siblings, 0 replies; 9+ messages in thread
From: Ingo Molnar @ 2016-08-02  8:14 UTC (permalink / raw)
  To: Thomas Garnier
  Cc: Thomas Gleixner, Ingo Molnar, H . Peter Anvin, Kees Cook, x86,
	LKML, kernel-hardening


* Thomas Garnier <thgarnie@google.com> wrote:

> On Wed, Jul 27, 2016 at 8:59 AM, Thomas Garnier <thgarnie@google.com> wrote:
> > Add vmemmap in the list of randomized memory regions.
> >
> > The vmemmap region holds a representation of the physical memory (through
> > a struct page array). An attacker could use this region to disclose the
> > kernel memory layout (walking the page linked list).
> >
> > Signed-off-by: Thomas Garnier <thgarnie@google.com>
> > Signed-off-by: Kees Cook <keescook@chromium.org>
> > ---
> > Missing patch didn't pick-up by the tip bot on KASLR memory randomization.
> > Resending after rebase on tip and tests as discussed with Ingo.
> > Based on tip 4bcc8cf6ab5932cbb2511c8e18065e61b069f21c
> 
> Ingo: Any comment? Can you integrate it on tip?
> 
> > ---
> >  arch/x86/include/asm/kaslr.h            |  1 +
> >  arch/x86/include/asm/pgtable_64_types.h |  4 +++-
> >  arch/x86/mm/kaslr.c                     | 24 +++++++++++++++++++++++-
> >  3 files changed, 27 insertions(+), 2 deletions(-)

After the merge window is over. There's no bad effect from the lack of this patch, 
other than lower level of randomization of kernel virtual addresses, right?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [kernel-hardening] Re: [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64)
@ 2016-08-02  8:14     ` Ingo Molnar
  0 siblings, 0 replies; 9+ messages in thread
From: Ingo Molnar @ 2016-08-02  8:14 UTC (permalink / raw)
  To: Thomas Garnier
  Cc: Thomas Gleixner, Ingo Molnar, H . Peter Anvin, Kees Cook, x86,
	LKML, kernel-hardening


* Thomas Garnier <thgarnie@google.com> wrote:

> On Wed, Jul 27, 2016 at 8:59 AM, Thomas Garnier <thgarnie@google.com> wrote:
> > Add vmemmap in the list of randomized memory regions.
> >
> > The vmemmap region holds a representation of the physical memory (through
> > a struct page array). An attacker could use this region to disclose the
> > kernel memory layout (walking the page linked list).
> >
> > Signed-off-by: Thomas Garnier <thgarnie@google.com>
> > Signed-off-by: Kees Cook <keescook@chromium.org>
> > ---
> > Missing patch didn't pick-up by the tip bot on KASLR memory randomization.
> > Resending after rebase on tip and tests as discussed with Ingo.
> > Based on tip 4bcc8cf6ab5932cbb2511c8e18065e61b069f21c
> 
> Ingo: Any comment? Can you integrate it on tip?
> 
> > ---
> >  arch/x86/include/asm/kaslr.h            |  1 +
> >  arch/x86/include/asm/pgtable_64_types.h |  4 +++-
> >  arch/x86/mm/kaslr.c                     | 24 +++++++++++++++++++++++-
> >  3 files changed, 27 insertions(+), 2 deletions(-)

After the merge window is over. There's no bad effect from the lack of this patch, 
other than lower level of randomization of kernel virtual addresses, right?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64)
  2016-08-02  8:14     ` [kernel-hardening] " Ingo Molnar
@ 2016-08-02 14:24       ` Thomas Garnier
  -1 siblings, 0 replies; 9+ messages in thread
From: Thomas Garnier @ 2016-08-02 14:24 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Thomas Gleixner, Ingo Molnar, H . Peter Anvin, Kees Cook, x86,
	LKML, kernel-hardening

On Tue, Aug 2, 2016 at 1:14 AM, Ingo Molnar <mingo@kernel.org> wrote:
>
> * Thomas Garnier <thgarnie@google.com> wrote:
>
>> On Wed, Jul 27, 2016 at 8:59 AM, Thomas Garnier <thgarnie@google.com> wrote:
>> > Add vmemmap in the list of randomized memory regions.
>> >
>> > The vmemmap region holds a representation of the physical memory (through
>> > a struct page array). An attacker could use this region to disclose the
>> > kernel memory layout (walking the page linked list).
>> >
>> > Signed-off-by: Thomas Garnier <thgarnie@google.com>
>> > Signed-off-by: Kees Cook <keescook@chromium.org>
>> > ---
>> > Missing patch didn't pick-up by the tip bot on KASLR memory randomization.
>> > Resending after rebase on tip and tests as discussed with Ingo.
>> > Based on tip 4bcc8cf6ab5932cbb2511c8e18065e61b069f21c
>>
>> Ingo: Any comment? Can you integrate it on tip?
>>
>> > ---
>> >  arch/x86/include/asm/kaslr.h            |  1 +
>> >  arch/x86/include/asm/pgtable_64_types.h |  4 +++-
>> >  arch/x86/mm/kaslr.c                     | 24 +++++++++++++++++++++++-
>> >  3 files changed, 27 insertions(+), 2 deletions(-)
>
> After the merge window is over. There's no bad effect from the lack of this patch,
> other than lower level of randomization of kernel virtual addresses, right?
>

That's right, this change just enables randomization for vmemmap.

> Thanks,
>
>         Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [kernel-hardening] Re: [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64)
@ 2016-08-02 14:24       ` Thomas Garnier
  0 siblings, 0 replies; 9+ messages in thread
From: Thomas Garnier @ 2016-08-02 14:24 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Thomas Gleixner, Ingo Molnar, H . Peter Anvin, Kees Cook, x86,
	LKML, kernel-hardening

On Tue, Aug 2, 2016 at 1:14 AM, Ingo Molnar <mingo@kernel.org> wrote:
>
> * Thomas Garnier <thgarnie@google.com> wrote:
>
>> On Wed, Jul 27, 2016 at 8:59 AM, Thomas Garnier <thgarnie@google.com> wrote:
>> > Add vmemmap in the list of randomized memory regions.
>> >
>> > The vmemmap region holds a representation of the physical memory (through
>> > a struct page array). An attacker could use this region to disclose the
>> > kernel memory layout (walking the page linked list).
>> >
>> > Signed-off-by: Thomas Garnier <thgarnie@google.com>
>> > Signed-off-by: Kees Cook <keescook@chromium.org>
>> > ---
>> > Missing patch didn't pick-up by the tip bot on KASLR memory randomization.
>> > Resending after rebase on tip and tests as discussed with Ingo.
>> > Based on tip 4bcc8cf6ab5932cbb2511c8e18065e61b069f21c
>>
>> Ingo: Any comment? Can you integrate it on tip?
>>
>> > ---
>> >  arch/x86/include/asm/kaslr.h            |  1 +
>> >  arch/x86/include/asm/pgtable_64_types.h |  4 +++-
>> >  arch/x86/mm/kaslr.c                     | 24 +++++++++++++++++++++++-
>> >  3 files changed, 27 insertions(+), 2 deletions(-)
>
> After the merge window is over. There's no bad effect from the lack of this patch,
> other than lower level of randomization of kernel virtual addresses, right?
>

That's right, this change just enables randomization for vmemmap.

> Thanks,
>
>         Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [tip:x86/mm] x86/mm/64: Enable KASLR for vmemmap memory region
  2016-07-27 15:59 ` [kernel-hardening] " Thomas Garnier
  (?)
  (?)
@ 2016-08-10 18:13 ` tip-bot for Thomas Garnier
  -1 siblings, 0 replies; 9+ messages in thread
From: tip-bot for Thomas Garnier @ 2016-08-10 18:13 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: jpoimboe, bp, linux-kernel, keescook, peterz, torvalds, tglx,
	mingo, dvlasenk, brgerst, hpa, luto, thgarnie

Commit-ID:  25dfe4785332723f09311dcb7fd91015a60c022f
Gitweb:     http://git.kernel.org/tip/25dfe4785332723f09311dcb7fd91015a60c022f
Author:     Thomas Garnier <thgarnie@google.com>
AuthorDate: Wed, 27 Jul 2016 08:59:56 -0700
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Wed, 10 Aug 2016 16:10:06 +0200

x86/mm/64: Enable KASLR for vmemmap memory region

Add vmemmap in the list of randomized memory regions.

The vmemmap region holds a representation of the physical memory (through
a struct page array). An attacker could use this region to disclose the
kernel memory layout (walking the page linked list).

Signed-off-by: Thomas Garnier <thgarnie@google.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kernel-hardening@lists.openwall.com
Link: http://lkml.kernel.org/r/1469635196-122447-1-git-send-email-thgarnie@google.com
[ Minor edits. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/include/asm/kaslr.h            |  1 +
 arch/x86/include/asm/pgtable_64_types.h |  4 +++-
 arch/x86/mm/kaslr.c                     | 26 ++++++++++++++++++++++++--
 3 files changed, 28 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
index 2674ee3..1052a79 100644
--- a/arch/x86/include/asm/kaslr.h
+++ b/arch/x86/include/asm/kaslr.h
@@ -6,6 +6,7 @@ unsigned long kaslr_get_random_long(const char *purpose);
 #ifdef CONFIG_RANDOMIZE_MEMORY
 extern unsigned long page_offset_base;
 extern unsigned long vmalloc_base;
+extern unsigned long vmemmap_base;
 
 void kernel_randomize_memory(void);
 #else
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 6fdef9e..3a26420 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -57,11 +57,13 @@ typedef struct { pteval_t pte; } pte_t;
 #define MAXMEM		_AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
 #define VMALLOC_SIZE_TB	_AC(32, UL)
 #define __VMALLOC_BASE	_AC(0xffffc90000000000, UL)
-#define VMEMMAP_START	_AC(0xffffea0000000000, UL)
+#define __VMEMMAP_BASE	_AC(0xffffea0000000000, UL)
 #ifdef CONFIG_RANDOMIZE_MEMORY
 #define VMALLOC_START	vmalloc_base
+#define VMEMMAP_START	vmemmap_base
 #else
 #define VMALLOC_START	__VMALLOC_BASE
+#define VMEMMAP_START	__VMEMMAP_BASE
 #endif /* CONFIG_RANDOMIZE_MEMORY */
 #define VMALLOC_END	(VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
 #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index ec8654f..aec03aa 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -40,17 +40,26 @@
  * You need to add an if/def entry if you introduce a new memory region
  * compatible with KASLR. Your entry must be in logical order with memory
  * layout. For example, ESPFIX is before EFI because its virtual address is
- * before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to
+ * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
  * ensure that this order is correct and won't be changed.
  */
 static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
-static const unsigned long vaddr_end = VMEMMAP_START;
+
+#if defined(CONFIG_X86_ESPFIX64)
+static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
+#elif defined(CONFIG_EFI)
+static const unsigned long vaddr_end = EFI_VA_START;
+#else
+static const unsigned long vaddr_end = __START_KERNEL_map;
+#endif
 
 /* Default values */
 unsigned long page_offset_base = __PAGE_OFFSET_BASE;
 EXPORT_SYMBOL(page_offset_base);
 unsigned long vmalloc_base = __VMALLOC_BASE;
 EXPORT_SYMBOL(vmalloc_base);
+unsigned long vmemmap_base = __VMEMMAP_BASE;
+EXPORT_SYMBOL(vmemmap_base);
 
 /*
  * Memory regions randomized by KASLR (except modules that use a separate logic
@@ -63,6 +72,7 @@ static __initdata struct kaslr_memory_region {
 } kaslr_regions[] = {
 	{ &page_offset_base, 64/* Maximum */ },
 	{ &vmalloc_base, VMALLOC_SIZE_TB },
+	{ &vmemmap_base, 1 },
 };
 
 /* Get size in bytes used by the memory region */
@@ -89,6 +99,18 @@ void __init kernel_randomize_memory(void)
 	struct rnd_state rand_state;
 	unsigned long remain_entropy;
 
+	/*
+	 * All these BUILD_BUG_ON checks ensures the memory layout is
+	 * consistent with the vaddr_start/vaddr_end variables.
+	 */
+	BUILD_BUG_ON(vaddr_start >= vaddr_end);
+	BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
+		     vaddr_end >= EFI_VA_START);
+	BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
+		      config_enabled(CONFIG_EFI)) &&
+		     vaddr_end >= __START_KERNEL_map);
+	BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
+
 	if (!kaslr_memory_enabled())
 		return;
 

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-08-10 18:14 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-27 15:59 [PATCH] x86/mm: Enable KASLR for vmemmap memory region (x86_64) Thomas Garnier
2016-07-27 15:59 ` [kernel-hardening] " Thomas Garnier
2016-08-01 17:09 ` Thomas Garnier
2016-08-01 17:09   ` [kernel-hardening] " Thomas Garnier
2016-08-02  8:14   ` Ingo Molnar
2016-08-02  8:14     ` [kernel-hardening] " Ingo Molnar
2016-08-02 14:24     ` Thomas Garnier
2016-08-02 14:24       ` [kernel-hardening] " Thomas Garnier
2016-08-10 18:13 ` [tip:x86/mm] x86/mm/64: Enable KASLR for vmemmap memory region tip-bot for Thomas Garnier

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.