linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v10] x86/KASLR: Clarify identity map interface
@ 2016-06-15 19:03 Kees Cook
  2016-06-15 23:23 ` Yinghai Lu
  0 siblings, 1 reply; 3+ messages in thread
From: Kees Cook @ 2016-06-15 19:03 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: linux-kernel, x86, Baoquan He, Borislav Petkov, Yinghai Lu,
	Andy Lutomirski, Thomas Garnier

This extracts the call to prepare_level4() into a top-level function
that the user of the pagetable.c interface must call to initialize
the new page tables. For clarity and to match the "finalize" function,
it has been renamed to initialize_identity_maps(). This function also
gains the initialization of mapping_info so we don't have to do it each
time in add_identity_map().

Additionally add copyright notice to the top, to make it clear that the
bulk of the pagetable.c code was written by Yinghai, and that I just
added bugs later. :)

Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Borislav Petkov <bp@suse.de>
---
This is a resend, for -next
---
 arch/x86/boot/compressed/kaslr.c     |  3 +++
 arch/x86/boot/compressed/misc.h      |  3 +++
 arch/x86/boot/compressed/pagetable.c | 26 ++++++++++++++++----------
 3 files changed, 22 insertions(+), 10 deletions(-)

diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index cfeb0259ed81..03a6f5d85a6b 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -485,6 +485,9 @@ unsigned char *choose_random_location(unsigned long input,
 
 	boot_params->hdr.loadflags |= KASLR_FLAG;
 
+	/* Prepare to add new identity pagetables on demand. */
+	initialize_identity_maps();
+
 	/* Record the various known unsafe memory ranges. */
 	mem_avoid_init(input, input_size, output);
 
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index b6fec1ff10e4..09c4ddd02ac6 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -85,10 +85,13 @@ unsigned char *choose_random_location(unsigned long input_ptr,
 #endif
 
 #ifdef CONFIG_X86_64
+void initialize_identity_maps(void);
 void add_identity_map(unsigned long start, unsigned long size);
 void finalize_identity_maps(void);
 extern unsigned char _pgtable[];
 #else
+static inline void initialize_identity_maps(void)
+{ }
 static inline void add_identity_map(unsigned long start, unsigned long size)
 { }
 static inline void finalize_identity_maps(void)
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index 34b95df14e69..6e31a6aac4d3 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -2,6 +2,9 @@
  * This code is used on x86_64 to create page table identity mappings on
  * demand by building up a new set of page tables (or appending to the
  * existing ones), and then switching over to them when ready.
+ *
+ * Copyright (C) 2015-2016  Yinghai Lu
+ * Copyright (C)      2016  Kees Cook
  */
 
 /*
@@ -59,9 +62,21 @@ static struct alloc_pgt_data pgt_data;
 /* The top level page table entry pointer. */
 static unsigned long level4p;
 
+/*
+ * Mapping information structure passed to kernel_ident_mapping_init().
+ * Due to relocation, pointers must be assigned at run time not build time.
+ */
+static struct x86_mapping_info mapping_info = {
+	.pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
+};
+
 /* Locates and clears a region for a new top level page table. */
-static void prepare_level4(void)
+void initialize_identity_maps(void)
 {
+	/* Init mapping_info with run-time function/buffer pointers. */
+	mapping_info.alloc_pgt_page = alloc_pgt_page;
+	mapping_info.context = &pgt_data;
+
 	/*
 	 * It should be impossible for this not to already be true,
 	 * but since calling this a second time would rewind the other
@@ -96,17 +111,8 @@ static void prepare_level4(void)
  */
 void add_identity_map(unsigned long start, unsigned long size)
 {
-	struct x86_mapping_info mapping_info = {
-		.alloc_pgt_page	= alloc_pgt_page,
-		.context	= &pgt_data,
-		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
-	};
 	unsigned long end = start + size;
 
-	/* Make sure we have a top level page table ready to use. */
-	if (!level4p)
-		prepare_level4();
-
 	/* Align boundary to 2M. */
 	start = round_down(start, PMD_SIZE);
 	end = round_up(end, PMD_SIZE);
-- 
2.7.4


-- 
Kees Cook
Chrome OS & Brillo Security

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v10] x86/KASLR: Clarify identity map interface
  2016-06-15 19:03 [PATCH v10] x86/KASLR: Clarify identity map interface Kees Cook
@ 2016-06-15 23:23 ` Yinghai Lu
  2016-06-15 23:25   ` Kees Cook
  0 siblings, 1 reply; 3+ messages in thread
From: Yinghai Lu @ 2016-06-15 23:23 UTC (permalink / raw)
  To: Kees Cook
  Cc: Ingo Molnar, Linux Kernel Mailing List, the arch/x86 maintainers,
	Baoquan He, Borislav Petkov, Andy Lutomirski, Thomas Garnier

On Wed, Jun 15, 2016 at 12:03 PM, Kees Cook <keescook@chromium.org> wrote:
> index cfeb0259ed81..03a6f5d85a6b 100644
> --- a/arch/x86/boot/compressed/kaslr.c
> +++ b/arch/x86/boot/compressed/kaslr.c
> @@ -485,6 +485,9 @@ unsigned char *choose_random_location(unsigned long input,
>
>         boot_params->hdr.loadflags |= KASLR_FLAG;
>
> +       /* Prepare to add new identity pagetables on demand. */
> +       initialize_identity_maps();
> +
>         /* Record the various known unsafe memory ranges. */
>         mem_avoid_init(input, input_size, output);
>
...
>
> -       /* Make sure we have a top level page table ready to use. */
> -       if (!level4p)
> -               prepare_level4();
> -
>         /* Align boundary to 2M. */
>         start = round_down(start, PMD_SIZE);
>         end = round_up(end, PMD_SIZE);

it is good to avoid that checking.

BTW, can you continue simplify mem_avoid_init() ?
something like:

Index: linux-2.6/arch/x86/boot/compressed/kaslr.c
===================================================================
--- linux-2.6.orig/arch/x86/boot/compressed/kaslr.c
+++ linux-2.6/arch/x86/boot/compressed/kaslr.c
@@ -122,16 +122,6 @@ struct mem_vector {
        unsigned long size;
 };

-enum mem_avoid_index {
-       MEM_AVOID_ZO_RANGE = 0,
-       MEM_AVOID_INITRD,
-       MEM_AVOID_CMDLINE,
-       MEM_AVOID_BOOTPARAMS,
-       MEM_AVOID_MAX,
-};
-
-static struct mem_vector mem_avoid[MEM_AVOID_MAX];
-
 static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
 {
        /* Item at least partially before region. */
@@ -154,6 +144,25 @@ static bool mem_overlaps(struct mem_vect
        return true;
 }

+#define MEM_AVOID_MAX 4
+static int avoid_count;
+static struct mem_vector mem_avoid[MEM_AVOID_MAX];
+static void mem_avoid_add_map(unsigned long start, unsigned long size,
+                               int add_map)
+{
+       if (avoid_count >= ARRAY_SIZE(mem_avoid)) {
+               warn("KASLR disabled: mem_avoid too small");
+               return;
+       mem_avoid[avoid_count].start = start;
+       mem_avoid[avoid_count].size = size;
+       if (add_map)
+               add_identity_map(start, size);
+
+       avoid_count++;
+}
+
 /*
  * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
  * The mem_avoid array is used to store the ranges that need to be avoided
@@ -240,19 +249,15 @@ static void mem_avoid_init(unsigned long
         * Avoid the region that is unsafe to overlap during
         * decompression.
         */
-       mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
-       mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
-       add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
-                        mem_avoid[MEM_AVOID_ZO_RANGE].size);
+       mem_avoid_add_map(input, (output + init_size) - input, 1);

        /* Avoid initrd. */
        initrd_start  = (u64)boot_params->ext_ramdisk_image << 32;
        initrd_start |= boot_params->hdr.ramdisk_image;
        initrd_size  = (u64)boot_params->ext_ramdisk_size << 32;
        initrd_size |= boot_params->hdr.ramdisk_size;
-       mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
-       mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
        /* No need to set mapping for initrd, it will be handled in VO. */
+       mem_avoid_add_map(initrd_start, initrd_size, 0);

        /* Avoid kernel command line. */
        cmd_line  = (u64)boot_params->ext_cmd_line_ptr << 32;
@@ -261,16 +266,10 @@ static void mem_avoid_init(unsigned long
        ptr = (char *)(unsigned long)cmd_line;
        for (cmd_line_size = 0; ptr[cmd_line_size++]; )
                ;
-       mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
-       mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
-       add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
-                        mem_avoid[MEM_AVOID_CMDLINE].size);
+       mem_avoid_add_map(cmd_line, cmd_line_size, 1);

        /* Avoid boot parameters. */
-       mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
-       mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
-       add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
-                        mem_avoid[MEM_AVOID_BOOTPARAMS].size);
+       mem_avoid_add_map((unsigned long)boot_params, sizeof(*boot_params), 1);

        /* We don't need to set a mapping for setup_data. */

@@ -292,7 +291,7 @@ static bool mem_avoid_overlap(struct mem
        unsigned long earliest = img->start + img->size;
        bool is_overlapping = false;

-       for (i = 0; i < MEM_AVOID_MAX; i++) {
+       for (i = 0; i < avoid_count; i++) {
                if (mem_overlaps(img, &mem_avoid[i]) &&
                    mem_avoid[i].start < earliest) {
                        *overlap = mem_avoid[i];

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v10] x86/KASLR: Clarify identity map interface
  2016-06-15 23:23 ` Yinghai Lu
@ 2016-06-15 23:25   ` Kees Cook
  0 siblings, 0 replies; 3+ messages in thread
From: Kees Cook @ 2016-06-15 23:25 UTC (permalink / raw)
  To: Yinghai Lu
  Cc: Ingo Molnar, Linux Kernel Mailing List, the arch/x86 maintainers,
	Baoquan He, Borislav Petkov, Andy Lutomirski, Thomas Garnier

On Wed, Jun 15, 2016 at 4:23 PM, Yinghai Lu <yinghai@kernel.org> wrote:
> On Wed, Jun 15, 2016 at 12:03 PM, Kees Cook <keescook@chromium.org> wrote:
>> index cfeb0259ed81..03a6f5d85a6b 100644
>> --- a/arch/x86/boot/compressed/kaslr.c
>> +++ b/arch/x86/boot/compressed/kaslr.c
>> @@ -485,6 +485,9 @@ unsigned char *choose_random_location(unsigned long input,
>>
>>         boot_params->hdr.loadflags |= KASLR_FLAG;
>>
>> +       /* Prepare to add new identity pagetables on demand. */
>> +       initialize_identity_maps();
>> +
>>         /* Record the various known unsafe memory ranges. */
>>         mem_avoid_init(input, input_size, output);
>>
> ...
>>
>> -       /* Make sure we have a top level page table ready to use. */
>> -       if (!level4p)
>> -               prepare_level4();
>> -
>>         /* Align boundary to 2M. */
>>         start = round_down(start, PMD_SIZE);
>>         end = round_up(end, PMD_SIZE);
>
> it is good to avoid that checking.
>
> BTW, can you continue simplify mem_avoid_init() ?
> something like:

Ah, that would be a nice improvement, yes. Let's pursue this after the
rest of the stack lands first.

-Kees

>
> Index: linux-2.6/arch/x86/boot/compressed/kaslr.c
> ===================================================================
> --- linux-2.6.orig/arch/x86/boot/compressed/kaslr.c
> +++ linux-2.6/arch/x86/boot/compressed/kaslr.c
> @@ -122,16 +122,6 @@ struct mem_vector {
>         unsigned long size;
>  };
>
> -enum mem_avoid_index {
> -       MEM_AVOID_ZO_RANGE = 0,
> -       MEM_AVOID_INITRD,
> -       MEM_AVOID_CMDLINE,
> -       MEM_AVOID_BOOTPARAMS,
> -       MEM_AVOID_MAX,
> -};
> -
> -static struct mem_vector mem_avoid[MEM_AVOID_MAX];
> -
>  static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
>  {
>         /* Item at least partially before region. */
> @@ -154,6 +144,25 @@ static bool mem_overlaps(struct mem_vect
>         return true;
>  }
>
> +#define MEM_AVOID_MAX 4
> +static int avoid_count;
> +static struct mem_vector mem_avoid[MEM_AVOID_MAX];
> +static void mem_avoid_add_map(unsigned long start, unsigned long size,
> +                               int add_map)
> +{
> +       if (avoid_count >= ARRAY_SIZE(mem_avoid)) {
> +               warn("KASLR disabled: mem_avoid too small");
> +               return;
> +       mem_avoid[avoid_count].start = start;
> +       mem_avoid[avoid_count].size = size;
> +       if (add_map)
> +               add_identity_map(start, size);
> +
> +       avoid_count++;
> +}
> +
>  /*
>   * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
>   * The mem_avoid array is used to store the ranges that need to be avoided
> @@ -240,19 +249,15 @@ static void mem_avoid_init(unsigned long
>          * Avoid the region that is unsafe to overlap during
>          * decompression.
>          */
> -       mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
> -       mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
> -       add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
> -                        mem_avoid[MEM_AVOID_ZO_RANGE].size);
> +       mem_avoid_add_map(input, (output + init_size) - input, 1);
>
>         /* Avoid initrd. */
>         initrd_start  = (u64)boot_params->ext_ramdisk_image << 32;
>         initrd_start |= boot_params->hdr.ramdisk_image;
>         initrd_size  = (u64)boot_params->ext_ramdisk_size << 32;
>         initrd_size |= boot_params->hdr.ramdisk_size;
> -       mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
> -       mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
>         /* No need to set mapping for initrd, it will be handled in VO. */
> +       mem_avoid_add_map(initrd_start, initrd_size, 0);
>
>         /* Avoid kernel command line. */
>         cmd_line  = (u64)boot_params->ext_cmd_line_ptr << 32;
> @@ -261,16 +266,10 @@ static void mem_avoid_init(unsigned long
>         ptr = (char *)(unsigned long)cmd_line;
>         for (cmd_line_size = 0; ptr[cmd_line_size++]; )
>                 ;
> -       mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
> -       mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
> -       add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
> -                        mem_avoid[MEM_AVOID_CMDLINE].size);
> +       mem_avoid_add_map(cmd_line, cmd_line_size, 1);
>
>         /* Avoid boot parameters. */
> -       mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
> -       mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
> -       add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
> -                        mem_avoid[MEM_AVOID_BOOTPARAMS].size);
> +       mem_avoid_add_map((unsigned long)boot_params, sizeof(*boot_params), 1);
>
>         /* We don't need to set a mapping for setup_data. */
>
> @@ -292,7 +291,7 @@ static bool mem_avoid_overlap(struct mem
>         unsigned long earliest = img->start + img->size;
>         bool is_overlapping = false;
>
> -       for (i = 0; i < MEM_AVOID_MAX; i++) {
> +       for (i = 0; i < avoid_count; i++) {
>                 if (mem_overlaps(img, &mem_avoid[i]) &&
>                     mem_avoid[i].start < earliest) {
>                         *overlap = mem_avoid[i];



-- 
Kees Cook
Chrome OS & Brillo Security

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2016-06-15 23:25 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-15 19:03 [PATCH v10] x86/KASLR: Clarify identity map interface Kees Cook
2016-06-15 23:23 ` Yinghai Lu
2016-06-15 23:25   ` Kees Cook

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).