All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andy Lutomirski <luto@kernel.org>
To: x86@kernel.org, linux-kernel@vger.kernel.org
Cc: linux-arch@vger.kernel.org, Borislav Petkov <bp@alien8.de>,
	Nadav Amit <nadav.amit@gmail.com>,
	Kees Cook <keescook@chromium.org>,
	Brian Gerst <brgerst@gmail.com>,
	"kernel-hardening@lists.openwall.com" 
	<kernel-hardening@lists.openwall.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Josh Poimboeuf <jpoimboe@redhat.com>, Jann Horn <jann@thejh.net>,
	Heiko Carstens <heiko.carstens@de.ibm.com>,
	Andy Lutomirski <luto@kernel.org>
Subject: [PATCH v3 06/13] fork: Add generic vmalloced stack support
Date: Mon, 20 Jun 2016 16:43:36 -0700	[thread overview]
Message-ID: <f7855f9eae0a27f5a03db1291f46fea1cc0a2a3f.1466466093.git.luto@kernel.org> (raw)
In-Reply-To: <cover.1466466093.git.luto@kernel.org>
In-Reply-To: <cover.1466466093.git.luto@kernel.org>

If CONFIG_VMAP_STACK is selected, kernel stacks are allocated with
vmalloc_node.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
 arch/Kconfig                        | 29 +++++++++++++
 arch/ia64/include/asm/thread_info.h |  2 +-
 include/linux/sched.h               | 15 +++++++
 kernel/fork.c                       | 82 +++++++++++++++++++++++++++++--------
 4 files changed, 110 insertions(+), 18 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index d794384a0404..a71e6e7195e6 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -658,4 +658,33 @@ config ARCH_NO_COHERENT_DMA_MMAP
 config CPU_NO_EFFICIENT_FFS
 	def_bool n
 
+config HAVE_ARCH_VMAP_STACK
+	def_bool n
+	help
+	  An arch should select this symbol if it can support kernel stacks
+	  in vmalloc space.  This means:
+
+	  - vmalloc space must be large enough to hold many kernel stacks.
+	    This may rule out many 32-bit architectures.
+
+	  - Stacks in vmalloc space need to work reliably.  For example, if
+	    vmap page tables are created on demand, either this mechanism
+	    needs to work while the stack points to a virtual address with
+	    unpopulated page tables or arch code (switch_to and switch_mm,
+	    most likely) needs to ensure that the stack's page table entries
+	    are populated before running on a possibly unpopulated stack.
+
+	  - If the stack overflows into a guard page, something reasonable
+	    should happen.  The definition of "reasonable" is flexible, but
+	    instantly rebooting without logging anything would be unfriendly.
+
+config VMAP_STACK
+	bool "Use a virtually-mapped stack"
+	depends on HAVE_ARCH_VMAP_STACK
+	---help---
+	  Enable this if you want the use virtually-mapped kernel stacks
+	  with guard pages.  This causes kernel stack overflows to be
+	  caught immediately rather than causing difficult-to-diagnose
+	  corruption.
+
 source "kernel/gcov/Kconfig"
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index aa995b67c3f5..d13edda6e09c 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -56,7 +56,7 @@ struct thread_info {
 #define alloc_thread_info_node(tsk, node)	((struct thread_info *) 0)
 #define task_thread_info(tsk)	((struct thread_info *) 0)
 #endif
-#define free_thread_info(ti)	/* nothing */
+#define free_thread_info(tsk)	/* nothing */
 #define task_stack_page(tsk)	((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e42ada26345..a37c3b790309 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1918,6 +1918,9 @@ struct task_struct {
 #ifdef CONFIG_MMU
 	struct task_struct *oom_reaper_list;
 #endif
+#ifdef CONFIG_VMAP_STACK
+	struct vm_struct *stack_vm_area;
+#endif
 /* CPU-specific state of this task */
 	struct thread_struct thread;
 /*
@@ -1934,6 +1937,18 @@ extern int arch_task_struct_size __read_mostly;
 # define arch_task_struct_size (sizeof(struct task_struct))
 #endif
 
+#ifdef CONFIG_VMAP_STACK
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+	return t->stack_vm_area;
+}
+#else
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+	return NULL;
+}
+#endif
+
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
diff --git a/kernel/fork.c b/kernel/fork.c
index ff3c41c2ba96..fe1c785e5f8c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -158,19 +158,38 @@ void __weak arch_release_thread_info(struct thread_info *ti)
  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
  * kmemcache based allocator.
  */
-# if THREAD_SIZE >= PAGE_SIZE
+# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
 						  int node)
 {
+#ifdef CONFIG_VMAP_STACK
+	struct thread_info *ti = __vmalloc_node_range(
+		THREAD_SIZE, THREAD_SIZE, VMALLOC_START, VMALLOC_END,
+		THREADINFO_GFP | __GFP_HIGHMEM, PAGE_KERNEL,
+		0, node, __builtin_return_address(0));
+
+	/*
+	 * We can't call find_vm_area() in interrupt context, and
+	 * free_thread_info can be called in interrupt context, so cache
+	 * the vm_struct.
+	 */
+	if (ti)
+		tsk->stack_vm_area = find_vm_area(ti);
+	return ti;
+#else
 	struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
 						  THREAD_SIZE_ORDER);
 
 	return page ? page_address(page) : NULL;
+#endif
 }
 
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_info(struct task_struct *tsk)
 {
-	free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+	if (task_stack_vm_area(tsk))
+		vfree(tsk->stack);
+	else
+		free_kmem_pages((unsigned long)tsk->stack, THREAD_SIZE_ORDER);
 }
 # else
 static struct kmem_cache *thread_info_cache;
@@ -181,9 +200,9 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
 	return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
 }
 
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_info(struct task_struct *tsk)
 {
-	kmem_cache_free(thread_info_cache, ti);
+	kmem_cache_free(thread_info_cache, tsk->stack);
 }
 
 void thread_info_cache_init(void)
@@ -213,24 +232,47 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(struct task_struct *tsk, int account)
 {
-	struct zone *zone = page_zone(virt_to_page(ti));
+	struct zone *zone;
+	struct thread_info *ti = task_thread_info(tsk);
+	struct vm_struct *vm = task_stack_vm_area(tsk);
+
+	BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
+
+	if (vm) {
+		int i;
 
-	mod_zone_page_state(zone, NR_KERNEL_STACK_KB,
-			    THREAD_SIZE / 1024 * account);
+		BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
 
-	/* All stack pages belong to the same memcg. */
-	memcg_kmem_update_page_stat(
-		virt_to_page(ti), MEMCG_KERNEL_STACK_KB,
-		account * (THREAD_SIZE / 1024));
+		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
+			mod_zone_page_state(page_zone(vm->pages[i]),
+					    NR_KERNEL_STACK_KB,
+					    PAGE_SIZE / 1024 * account);
+		}
+
+		/* All stack pages belong to the same memcg. */
+		memcg_kmem_update_page_stat(
+			vm->pages[0], MEMCG_KERNEL_STACK_KB,
+			account * (THREAD_SIZE / 1024));
+	} else {
+		zone = page_zone(virt_to_page(ti));
+
+		mod_zone_page_state(zone, NR_KERNEL_STACK_KB,
+				    THREAD_SIZE / 1024 * account);
+
+		/* All stack pages belong to the same memcg. */
+		memcg_kmem_update_page_stat(
+			virt_to_page(ti), MEMCG_KERNEL_STACK_KB,
+			account * (THREAD_SIZE / 1024));
+	}
 }
 
 void free_task(struct task_struct *tsk)
 {
-	account_kernel_stack(tsk->stack, -1);
+	account_kernel_stack(tsk, -1);
 	arch_release_thread_info(tsk->stack);
-	free_thread_info(tsk->stack);
+	free_thread_info(tsk);
 	rt_mutex_debug_task_free(tsk);
 	ftrace_graph_exit_task(tsk);
 	put_seccomp_filter(tsk);
@@ -342,6 +384,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
 	struct task_struct *tsk;
 	struct thread_info *ti;
+	struct vm_struct *stack_vm_area;
 	int err;
 
 	if (node == NUMA_NO_NODE)
@@ -354,11 +397,16 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	if (!ti)
 		goto free_tsk;
 
+	stack_vm_area = task_stack_vm_area(tsk);
+
 	err = arch_dup_task_struct(tsk, orig);
 	if (err)
 		goto free_ti;
 
 	tsk->stack = ti;
+#ifdef CONFIG_VMAP_STACK
+	tsk->stack_vm_area = stack_vm_area;
+#endif
 #ifdef CONFIG_SECCOMP
 	/*
 	 * We must handle setting up seccomp filters once we're under
@@ -390,14 +438,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	tsk->task_frag.page = NULL;
 	tsk->wake_q.next = NULL;
 
-	account_kernel_stack(ti, 1);
+	account_kernel_stack(tsk, 1);
 
 	kcov_task_init(tsk);
 
 	return tsk;
 
 free_ti:
-	free_thread_info(ti);
+	free_thread_info(tsk);
 free_tsk:
 	free_task_struct(tsk);
 	return NULL;
-- 
2.5.5

WARNING: multiple messages have this Message-ID (diff)
From: Andy Lutomirski <luto@kernel.org>
To: x86@kernel.org, linux-kernel@vger.kernel.org
Cc: linux-arch@vger.kernel.org, Borislav Petkov <bp@alien8.de>,
	Nadav Amit <nadav.amit@gmail.com>,
	Kees Cook <keescook@chromium.org>,
	Brian Gerst <brgerst@gmail.com>,
	"kernel-hardening@lists.openwall.com"
	<kernel-hardening@lists.openwall.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Josh Poimboeuf <jpoimboe@redhat.com>, Jann Horn <jann@thejh.net>,
	Heiko Carstens <heiko.carstens@de.ibm.com>,
	Andy Lutomirski <luto@kernel.org>
Subject: [kernel-hardening] [PATCH v3 06/13] fork: Add generic vmalloced stack support
Date: Mon, 20 Jun 2016 16:43:36 -0700	[thread overview]
Message-ID: <f7855f9eae0a27f5a03db1291f46fea1cc0a2a3f.1466466093.git.luto@kernel.org> (raw)
In-Reply-To: <cover.1466466093.git.luto@kernel.org>
In-Reply-To: <cover.1466466093.git.luto@kernel.org>

If CONFIG_VMAP_STACK is selected, kernel stacks are allocated with
vmalloc_node.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
 arch/Kconfig                        | 29 +++++++++++++
 arch/ia64/include/asm/thread_info.h |  2 +-
 include/linux/sched.h               | 15 +++++++
 kernel/fork.c                       | 82 +++++++++++++++++++++++++++++--------
 4 files changed, 110 insertions(+), 18 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index d794384a0404..a71e6e7195e6 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -658,4 +658,33 @@ config ARCH_NO_COHERENT_DMA_MMAP
 config CPU_NO_EFFICIENT_FFS
 	def_bool n
 
+config HAVE_ARCH_VMAP_STACK
+	def_bool n
+	help
+	  An arch should select this symbol if it can support kernel stacks
+	  in vmalloc space.  This means:
+
+	  - vmalloc space must be large enough to hold many kernel stacks.
+	    This may rule out many 32-bit architectures.
+
+	  - Stacks in vmalloc space need to work reliably.  For example, if
+	    vmap page tables are created on demand, either this mechanism
+	    needs to work while the stack points to a virtual address with
+	    unpopulated page tables or arch code (switch_to and switch_mm,
+	    most likely) needs to ensure that the stack's page table entries
+	    are populated before running on a possibly unpopulated stack.
+
+	  - If the stack overflows into a guard page, something reasonable
+	    should happen.  The definition of "reasonable" is flexible, but
+	    instantly rebooting without logging anything would be unfriendly.
+
+config VMAP_STACK
+	bool "Use a virtually-mapped stack"
+	depends on HAVE_ARCH_VMAP_STACK
+	---help---
+	  Enable this if you want the use virtually-mapped kernel stacks
+	  with guard pages.  This causes kernel stack overflows to be
+	  caught immediately rather than causing difficult-to-diagnose
+	  corruption.
+
 source "kernel/gcov/Kconfig"
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index aa995b67c3f5..d13edda6e09c 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -56,7 +56,7 @@ struct thread_info {
 #define alloc_thread_info_node(tsk, node)	((struct thread_info *) 0)
 #define task_thread_info(tsk)	((struct thread_info *) 0)
 #endif
-#define free_thread_info(ti)	/* nothing */
+#define free_thread_info(tsk)	/* nothing */
 #define task_stack_page(tsk)	((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e42ada26345..a37c3b790309 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1918,6 +1918,9 @@ struct task_struct {
 #ifdef CONFIG_MMU
 	struct task_struct *oom_reaper_list;
 #endif
+#ifdef CONFIG_VMAP_STACK
+	struct vm_struct *stack_vm_area;
+#endif
 /* CPU-specific state of this task */
 	struct thread_struct thread;
 /*
@@ -1934,6 +1937,18 @@ extern int arch_task_struct_size __read_mostly;
 # define arch_task_struct_size (sizeof(struct task_struct))
 #endif
 
+#ifdef CONFIG_VMAP_STACK
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+	return t->stack_vm_area;
+}
+#else
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+	return NULL;
+}
+#endif
+
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
diff --git a/kernel/fork.c b/kernel/fork.c
index ff3c41c2ba96..fe1c785e5f8c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -158,19 +158,38 @@ void __weak arch_release_thread_info(struct thread_info *ti)
  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
  * kmemcache based allocator.
  */
-# if THREAD_SIZE >= PAGE_SIZE
+# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
 						  int node)
 {
+#ifdef CONFIG_VMAP_STACK
+	struct thread_info *ti = __vmalloc_node_range(
+		THREAD_SIZE, THREAD_SIZE, VMALLOC_START, VMALLOC_END,
+		THREADINFO_GFP | __GFP_HIGHMEM, PAGE_KERNEL,
+		0, node, __builtin_return_address(0));
+
+	/*
+	 * We can't call find_vm_area() in interrupt context, and
+	 * free_thread_info can be called in interrupt context, so cache
+	 * the vm_struct.
+	 */
+	if (ti)
+		tsk->stack_vm_area = find_vm_area(ti);
+	return ti;
+#else
 	struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
 						  THREAD_SIZE_ORDER);
 
 	return page ? page_address(page) : NULL;
+#endif
 }
 
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_info(struct task_struct *tsk)
 {
-	free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+	if (task_stack_vm_area(tsk))
+		vfree(tsk->stack);
+	else
+		free_kmem_pages((unsigned long)tsk->stack, THREAD_SIZE_ORDER);
 }
 # else
 static struct kmem_cache *thread_info_cache;
@@ -181,9 +200,9 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
 	return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
 }
 
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_info(struct task_struct *tsk)
 {
-	kmem_cache_free(thread_info_cache, ti);
+	kmem_cache_free(thread_info_cache, tsk->stack);
 }
 
 void thread_info_cache_init(void)
@@ -213,24 +232,47 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(struct task_struct *tsk, int account)
 {
-	struct zone *zone = page_zone(virt_to_page(ti));
+	struct zone *zone;
+	struct thread_info *ti = task_thread_info(tsk);
+	struct vm_struct *vm = task_stack_vm_area(tsk);
+
+	BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
+
+	if (vm) {
+		int i;
 
-	mod_zone_page_state(zone, NR_KERNEL_STACK_KB,
-			    THREAD_SIZE / 1024 * account);
+		BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
 
-	/* All stack pages belong to the same memcg. */
-	memcg_kmem_update_page_stat(
-		virt_to_page(ti), MEMCG_KERNEL_STACK_KB,
-		account * (THREAD_SIZE / 1024));
+		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
+			mod_zone_page_state(page_zone(vm->pages[i]),
+					    NR_KERNEL_STACK_KB,
+					    PAGE_SIZE / 1024 * account);
+		}
+
+		/* All stack pages belong to the same memcg. */
+		memcg_kmem_update_page_stat(
+			vm->pages[0], MEMCG_KERNEL_STACK_KB,
+			account * (THREAD_SIZE / 1024));
+	} else {
+		zone = page_zone(virt_to_page(ti));
+
+		mod_zone_page_state(zone, NR_KERNEL_STACK_KB,
+				    THREAD_SIZE / 1024 * account);
+
+		/* All stack pages belong to the same memcg. */
+		memcg_kmem_update_page_stat(
+			virt_to_page(ti), MEMCG_KERNEL_STACK_KB,
+			account * (THREAD_SIZE / 1024));
+	}
 }
 
 void free_task(struct task_struct *tsk)
 {
-	account_kernel_stack(tsk->stack, -1);
+	account_kernel_stack(tsk, -1);
 	arch_release_thread_info(tsk->stack);
-	free_thread_info(tsk->stack);
+	free_thread_info(tsk);
 	rt_mutex_debug_task_free(tsk);
 	ftrace_graph_exit_task(tsk);
 	put_seccomp_filter(tsk);
@@ -342,6 +384,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
 	struct task_struct *tsk;
 	struct thread_info *ti;
+	struct vm_struct *stack_vm_area;
 	int err;
 
 	if (node == NUMA_NO_NODE)
@@ -354,11 +397,16 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	if (!ti)
 		goto free_tsk;
 
+	stack_vm_area = task_stack_vm_area(tsk);
+
 	err = arch_dup_task_struct(tsk, orig);
 	if (err)
 		goto free_ti;
 
 	tsk->stack = ti;
+#ifdef CONFIG_VMAP_STACK
+	tsk->stack_vm_area = stack_vm_area;
+#endif
 #ifdef CONFIG_SECCOMP
 	/*
 	 * We must handle setting up seccomp filters once we're under
@@ -390,14 +438,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	tsk->task_frag.page = NULL;
 	tsk->wake_q.next = NULL;
 
-	account_kernel_stack(ti, 1);
+	account_kernel_stack(tsk, 1);
 
 	kcov_task_init(tsk);
 
 	return tsk;
 
 free_ti:
-	free_thread_info(ti);
+	free_thread_info(tsk);
 free_tsk:
 	free_task_struct(tsk);
 	return NULL;
-- 
2.5.5

  parent reply	other threads:[~2016-06-20 23:48 UTC|newest]

Thread overview: 269+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-20 23:43 [PATCH v3 00/13] Virtually mapped stacks with guard pages (x86, core) Andy Lutomirski
2016-06-20 23:43 ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43 ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 01/13] x86/mm/hotplug: Don't remove PGD entries in remove_pagetable() Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 02/13] x86/cpa: In populate_pgd, don't set the pgd entry until it's populated Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 03/13] x86/mm: Remove kernel_unmap_pages_in_pgd() and efi_cleanup_page_tables() Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-21  9:53   ` Matt Fleming
2016-06-21  9:53     ` [kernel-hardening] " Matt Fleming
2016-06-21  9:53     ` Matt Fleming
2016-06-20 23:43 ` [PATCH v3 04/13] mm: Track NR_KERNEL_STACK in KiB instead of number of stacks Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-21  9:46   ` Vladimir Davydov
2016-06-21  9:46     ` [kernel-hardening] " Vladimir Davydov
2016-06-21  9:46     ` Vladimir Davydov
2016-06-21  9:46     ` Vladimir Davydov
2016-06-22  7:35   ` Michal Hocko
2016-06-22  7:35     ` [kernel-hardening] " Michal Hocko
2016-06-22  7:35     ` Michal Hocko
2016-06-22  7:35     ` Michal Hocko
2016-06-20 23:43 ` [PATCH v3 05/13] mm: Fix memcg stack accounting for sub-page stacks Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-21  9:54   ` Vladimir Davydov
2016-06-21  9:54     ` [kernel-hardening] " Vladimir Davydov
2016-06-21  9:54     ` Vladimir Davydov
2016-06-21  9:54     ` Vladimir Davydov
2016-06-22  7:38   ` Michal Hocko
2016-06-22  7:38     ` [kernel-hardening] " Michal Hocko
2016-06-22  7:38     ` Michal Hocko
2016-06-22  7:38     ` Michal Hocko
2016-06-20 23:43 ` Andy Lutomirski [this message]
2016-06-20 23:43   ` [kernel-hardening] [PATCH v3 06/13] fork: Add generic vmalloced stack support Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-21  7:30   ` Jann Horn
2016-06-21  7:30     ` [kernel-hardening] " Jann Horn
2016-06-21  7:30     ` Jann Horn
2016-06-21 16:59     ` Andy Lutomirski
2016-06-21 16:59       ` [kernel-hardening] " Andy Lutomirski
2016-06-21 16:59       ` Andy Lutomirski
2016-06-21 17:13       ` Kees Cook
2016-06-21 17:13         ` [kernel-hardening] " Kees Cook
2016-06-21 17:13         ` Kees Cook
2016-06-21 17:28         ` Andy Lutomirski
2016-06-21 17:28           ` [kernel-hardening] " Andy Lutomirski
2016-06-21 17:28           ` Andy Lutomirski
2016-06-21 18:32         ` [kernel-hardening] " Rik van Riel
2016-06-21 18:32           ` Rik van Riel
2016-06-21 19:44           ` [kernel-hardening] " Arnd Bergmann
2016-06-21 19:44             ` Arnd Bergmann
2016-06-21 19:43             ` [kernel-hardening] " Andy Lutomirski
2016-06-21 19:43               ` Andy Lutomirski
2016-06-21 19:43               ` Andy Lutomirski
2016-07-11 17:00           ` [kernel-hardening] " Andrey Ryabinin
2016-06-20 23:43 ` [PATCH v3 07/13] x86/die: Don't try to recover from an OOPS on a non-default stack Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 08/13] x86/dumpstack: When OOPSing, rewind the stack before do_exit Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 09/13] x86/dumpstack: When dumping stack bytes due to OOPS, start with regs->sp Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 10/13] x86/dumpstack: Try harder to get a call trace on stack overflow Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 11/13] x86/dumpstack/64: Handle faults when printing the "Stack:" part of an OOPS Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 12/13] x86/mm/64: Enable vmapped stacks Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-20 23:43 ` [PATCH v3 13/13] x86/mm: Improve stack-overflow #PF handling Andy Lutomirski
2016-06-20 23:43   ` [kernel-hardening] " Andy Lutomirski
2016-06-20 23:43   ` Andy Lutomirski
2016-06-21  4:01 ` [PATCH v3 00/13] Virtually mapped stacks with guard pages (x86, core) Linus Torvalds
2016-06-21  4:01   ` [kernel-hardening] " Linus Torvalds
2016-06-21  4:01   ` Linus Torvalds
2016-06-21 16:45   ` Andy Lutomirski
2016-06-21 16:45     ` [kernel-hardening] " Andy Lutomirski
2016-06-21 16:45     ` Andy Lutomirski
2016-06-21 17:16     ` Linus Torvalds
2016-06-21 17:16       ` [kernel-hardening] " Linus Torvalds
2016-06-21 17:16       ` Linus Torvalds
2016-06-21 17:27       ` Andy Lutomirski
2016-06-21 17:27         ` [kernel-hardening] " Andy Lutomirski
2016-06-21 17:27         ` Andy Lutomirski
2016-06-21 18:12         ` Kees Cook
2016-06-21 18:12           ` [kernel-hardening] " Kees Cook
2016-06-21 18:12           ` Kees Cook
2016-06-21 18:19           ` [kernel-hardening] " Rik van Riel
2016-06-21 18:19             ` Rik van Riel
2016-06-23  1:22   ` Andy Lutomirski
2016-06-23  1:22     ` [kernel-hardening] " Andy Lutomirski
2016-06-23  1:22     ` Andy Lutomirski
2016-06-23  6:02     ` Linus Torvalds
2016-06-23  6:02       ` [kernel-hardening] " Linus Torvalds
2016-06-23  6:02       ` Linus Torvalds
2016-06-23 14:31       ` Oleg Nesterov
2016-06-23 14:31         ` [kernel-hardening] " Oleg Nesterov
2016-06-23 14:31         ` Oleg Nesterov
2016-06-23 16:30         ` Linus Torvalds
2016-06-23 16:30           ` [kernel-hardening] " Linus Torvalds
2016-06-23 16:30           ` Linus Torvalds
2016-06-23 16:41           ` Andy Lutomirski
2016-06-23 16:41             ` [kernel-hardening] " Andy Lutomirski
2016-06-23 16:41             ` Andy Lutomirski
2016-06-23 17:10             ` Oleg Nesterov
2016-06-23 17:10               ` [kernel-hardening] " Oleg Nesterov
2016-06-23 17:10               ` Oleg Nesterov
2016-09-06 16:19             ` Jann Horn
2016-09-06 16:19               ` [kernel-hardening] " Jann Horn
2016-09-06 16:19               ` Jann Horn
2016-09-06 16:40               ` Andy Lutomirski
2016-09-06 16:40                 ` [kernel-hardening] " Andy Lutomirski
2016-09-06 16:40                 ` Andy Lutomirski
2016-06-23 17:03           ` Oleg Nesterov
2016-06-23 17:03             ` [kernel-hardening] " Oleg Nesterov
2016-06-23 17:03             ` Oleg Nesterov
2016-06-23 17:44             ` Linus Torvalds
2016-06-23 17:44               ` [kernel-hardening] " Linus Torvalds
2016-06-23 17:44               ` Linus Torvalds
2016-06-23 17:52               ` Linus Torvalds
2016-06-23 17:52                 ` [kernel-hardening] " Linus Torvalds
2016-06-23 17:52                 ` Linus Torvalds
2016-06-23 18:00                 ` Kees Cook
2016-06-23 18:00                   ` [kernel-hardening] " Kees Cook
2016-06-23 18:00                   ` Kees Cook
2016-06-23 18:54                   ` Peter Zijlstra
2016-06-23 18:54                     ` [kernel-hardening] " Peter Zijlstra
2016-06-23 18:54                     ` Peter Zijlstra
2016-06-23 18:12                 ` Oleg Nesterov
2016-06-23 18:12                   ` [kernel-hardening] " Oleg Nesterov
2016-06-23 18:12                   ` Oleg Nesterov
2016-06-23 18:55                   ` Peter Zijlstra
2016-06-23 18:55                     ` [kernel-hardening] " Peter Zijlstra
2016-06-23 18:55                     ` Peter Zijlstra
2016-06-23 18:46                 ` Linus Torvalds
2016-06-23 18:46                   ` [kernel-hardening] " Linus Torvalds
2016-06-23 18:46                   ` Linus Torvalds
2016-06-23 19:08                   ` Andy Lutomirski
2016-06-23 19:08                     ` [kernel-hardening] " Andy Lutomirski
2016-06-23 19:08                     ` Andy Lutomirski
2016-06-23 18:53                 ` Peter Zijlstra
2016-06-23 18:53                   ` [kernel-hardening] " Peter Zijlstra
2016-06-23 18:53                   ` Peter Zijlstra
2016-06-23 19:09                   ` Andy Lutomirski
2016-06-23 19:09                     ` [kernel-hardening] " Andy Lutomirski
2016-06-23 19:09                     ` Andy Lutomirski
2016-06-23 19:13                     ` Peter Zijlstra
2016-06-23 19:13                       ` [kernel-hardening] " Peter Zijlstra
2016-06-23 19:13                       ` Peter Zijlstra
2016-06-23 19:17                   ` Linus Torvalds
2016-06-23 19:17                     ` [kernel-hardening] " Linus Torvalds
2016-06-23 19:17                     ` Linus Torvalds
2016-06-24  6:17                     ` Linus Torvalds
2016-06-24  6:17                       ` [kernel-hardening] " Linus Torvalds
2016-06-24  6:17                       ` Linus Torvalds
2016-06-24 12:25                       ` Brian Gerst
2016-06-24 12:25                         ` [kernel-hardening] " Brian Gerst
2016-06-24 12:25                         ` Brian Gerst
2016-06-24 17:21                         ` Linus Torvalds
2016-06-24 17:21                           ` [kernel-hardening] " Linus Torvalds
2016-06-24 17:21                           ` Linus Torvalds
2016-06-24 17:40                           ` Linus Torvalds
2016-06-24 17:40                             ` [kernel-hardening] " Linus Torvalds
2016-06-24 17:40                             ` Linus Torvalds
2016-06-24 17:47                             ` Andy Lutomirski
2016-06-24 17:47                               ` [kernel-hardening] " Andy Lutomirski
2016-06-24 17:47                               ` Andy Lutomirski
2016-06-24 17:56                               ` Linus Torvalds
2016-06-24 17:56                                 ` [kernel-hardening] " Linus Torvalds
2016-06-24 17:56                                 ` Linus Torvalds
2016-06-24 18:36                                 ` Andy Lutomirski
2016-06-24 18:36                                   ` [kernel-hardening] " Andy Lutomirski
2016-06-24 18:36                                   ` Andy Lutomirski
2016-06-24 17:51                             ` Linus Torvalds
2016-06-24 17:51                               ` [kernel-hardening] " Linus Torvalds
2016-06-24 17:51                               ` Linus Torvalds
2016-06-24 18:11                               ` Linus Torvalds
2016-06-24 18:11                                 ` [kernel-hardening] " Linus Torvalds
2016-06-24 18:11                                 ` Linus Torvalds
2016-06-24 20:25                                 ` Josh Poimboeuf
2016-06-24 20:25                                   ` [kernel-hardening] " Josh Poimboeuf
2016-06-24 20:25                                   ` Josh Poimboeuf
2016-06-24 20:51                                   ` Josh Poimboeuf
2016-06-24 20:51                                     ` [kernel-hardening] " Josh Poimboeuf
2016-06-24 20:51                                     ` Josh Poimboeuf
2016-06-24 20:53                                     ` Andy Lutomirski
2016-06-24 20:53                                       ` [kernel-hardening] " Andy Lutomirski
2016-06-24 20:53                                       ` Andy Lutomirski
2016-06-24 21:06                                     ` Linus Torvalds
2016-06-24 21:06                                       ` [kernel-hardening] " Linus Torvalds
2016-06-24 21:06                                       ` Linus Torvalds
2016-06-24 21:25                                       ` Andy Lutomirski
2016-06-24 21:25                                         ` [kernel-hardening] " Andy Lutomirski
2016-06-24 21:25                                         ` Andy Lutomirski
2016-06-24 21:32                                         ` Linus Torvalds
2016-06-24 21:32                                           ` [kernel-hardening] " Linus Torvalds
2016-06-24 21:32                                           ` Linus Torvalds
2016-06-24 21:34                                           ` Andy Lutomirski
2016-06-24 21:34                                             ` [kernel-hardening] " Andy Lutomirski
2016-06-24 21:34                                             ` Andy Lutomirski
2016-06-25  2:41                                             ` Linus Torvalds
2016-06-25  2:41                                               ` [kernel-hardening] " Linus Torvalds
2016-06-25  2:41                                               ` Linus Torvalds
2016-06-25 23:19                                               ` Andy Lutomirski
2016-06-25 23:19                                                 ` [kernel-hardening] " Andy Lutomirski
2016-06-25 23:19                                                 ` Andy Lutomirski
2016-06-25 23:30                                                 ` Andy Lutomirski
2016-06-25 23:30                                                   ` [kernel-hardening] " Andy Lutomirski
2016-06-25 23:30                                                   ` Andy Lutomirski
2016-06-26  1:23                                                   ` Linus Torvalds
2016-06-26  1:23                                                     ` [kernel-hardening] " Linus Torvalds
2016-06-26  1:23                                                     ` Linus Torvalds
2016-06-23 18:52               ` Oleg Nesterov
2016-06-23 18:52                 ` [kernel-hardening] " Oleg Nesterov
2016-06-23 18:52                 ` Oleg Nesterov
2016-06-24 14:05                 ` Michal Hocko
2016-06-24 14:05                   ` [kernel-hardening] " Michal Hocko
2016-06-24 14:05                   ` Michal Hocko
2016-06-24 15:06                   ` Michal Hocko
2016-06-24 15:06                     ` [kernel-hardening] " Michal Hocko
2016-06-24 15:06                     ` Michal Hocko
2016-06-24 15:06                     ` Michal Hocko
2016-06-24 20:22                     ` Oleg Nesterov
2016-06-24 20:22                       ` [kernel-hardening] " Oleg Nesterov
2016-06-24 20:22                       ` Oleg Nesterov
2016-06-27 10:36                       ` Michal Hocko
2016-06-27 10:36                         ` [kernel-hardening] " Michal Hocko
2016-06-27 10:36                         ` Michal Hocko
2016-06-23 19:11         ` Peter Zijlstra
2016-06-23 19:11           ` [kernel-hardening] " Peter Zijlstra
2016-06-23 19:11           ` Peter Zijlstra
2016-06-23 19:34           ` Linus Torvalds
2016-06-23 19:34             ` [kernel-hardening] " Linus Torvalds
2016-06-23 19:34             ` Linus Torvalds
2016-06-23 19:46             ` Peter Zijlstra
2016-06-23 19:46               ` [kernel-hardening] " Peter Zijlstra
2016-06-23 19:46               ` Peter Zijlstra
2016-06-21  9:24 ` Arnd Bergmann
2016-06-21  9:24   ` [kernel-hardening] " Arnd Bergmann
2016-06-21  9:24   ` Arnd Bergmann
2016-06-21 17:16   ` Kees Cook
2016-06-21 17:16     ` [kernel-hardening] " Kees Cook
2016-06-21 17:16     ` Kees Cook
2016-06-21 18:02     ` [kernel-hardening] " Rik van Riel
2016-06-21 18:02       ` Rik van Riel
2016-06-21 18:05       ` [kernel-hardening] " Andy Lutomirski
2016-06-21 18:05         ` Andy Lutomirski
2016-06-21 18:05         ` Andy Lutomirski
2016-06-21 19:47     ` Arnd Bergmann
2016-06-21 19:47       ` [kernel-hardening] " Arnd Bergmann
2016-06-21 19:47       ` Arnd Bergmann
2016-06-21 19:47       ` Andy Lutomirski
2016-06-21 19:47         ` [kernel-hardening] " Andy Lutomirski
2016-06-21 19:47         ` Andy Lutomirski
2016-06-21 20:18         ` Kees Cook
2016-06-21 20:18           ` [kernel-hardening] " Kees Cook
2016-06-21 20:18           ` Kees Cook

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f7855f9eae0a27f5a03db1291f46fea1cc0a2a3f.1466466093.git.luto@kernel.org \
    --to=luto@kernel.org \
    --cc=bp@alien8.de \
    --cc=brgerst@gmail.com \
    --cc=heiko.carstens@de.ibm.com \
    --cc=jann@thejh.net \
    --cc=jpoimboe@redhat.com \
    --cc=keescook@chromium.org \
    --cc=kernel-hardening@lists.openwall.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=nadav.amit@gmail.com \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.