All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andy Lutomirski <luto@kernel.org>
To: x86@kernel.org, linux-kernel@vger.kernel.org
Cc: linux-arch@vger.kernel.org, Borislav Petkov <bp@alien8.de>,
	Nadav Amit <nadav.amit@gmail.com>,
	Kees Cook <keescook@chromium.org>,
	Brian Gerst <brgerst@gmail.com>,
	"kernel-hardening@lists.openwall.com" 
	<kernel-hardening@lists.openwall.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Josh Poimboeuf <jpoimboe@redhat.com>, Jann Horn <jann@thejh.net>,
	Heiko Carstens <heiko.carstens@de.ibm.com>,
	Andy Lutomirski <luto@kernel.org>,
	Oleg Nesterov <oleg@redhat.com>
Subject: [PATCH v5 07/32] fork: Add generic vmalloced stack support
Date: Mon, 11 Jul 2016 13:53:40 -0700	[thread overview]
Message-ID: <51f37059777910f989486657d1e27ded4c7edad2.1468270393.git.luto@kernel.org> (raw)
In-Reply-To: <cover.1468270393.git.luto@kernel.org>
In-Reply-To: <cover.1468270393.git.luto@kernel.org>

If CONFIG_VMAP_STACK is selected, kernel stacks are allocated with
vmalloc_node.

grsecurity has had a similar feature (called
GRKERNSEC_KSTACKOVERFLOW) for a long time.

Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
 arch/Kconfig                        | 33 +++++++++++++
 arch/ia64/include/asm/thread_info.h |  2 +-
 include/linux/sched.h               | 15 ++++++
 kernel/fork.c                       | 96 +++++++++++++++++++++++++++++--------
 4 files changed, 125 insertions(+), 21 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 15996290fed4..14398fcf8292 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -661,4 +661,37 @@ config ARCH_NO_COHERENT_DMA_MMAP
 config CPU_NO_EFFICIENT_FFS
 	def_bool n
 
+config HAVE_ARCH_VMAP_STACK
+	def_bool n
+	help
+	  An arch should select this symbol if it can support kernel stacks
+	  in vmalloc space.  This means:
+
+	  - vmalloc space must be large enough to hold many kernel stacks.
+	    This may rule out many 32-bit architectures.
+
+	  - Stacks in vmalloc space need to work reliably.  For example, if
+	    vmap page tables are created on demand, either this mechanism
+	    needs to work while the stack points to a virtual address with
+	    unpopulated page tables or arch code (switch_to and switch_mm,
+	    most likely) needs to ensure that the stack's page table entries
+	    are populated before running on a possibly unpopulated stack.
+
+	  - If the stack overflows into a guard page, something reasonable
+	    should happen.  The definition of "reasonable" is flexible, but
+	    instantly rebooting without logging anything would be unfriendly.
+
+config VMAP_STACK
+	bool "Use a virtually-mapped stack"
+	depends on HAVE_ARCH_VMAP_STACK && !KASAN
+	---help---
+	  Enable this if you want the use virtually-mapped kernel stacks
+	  with guard pages.  This causes kernel stack overflows to be
+	  caught immediately rather than causing difficult-to-diagnose
+	  corruption.
+
+	  This is presently incompatible with KASAN because KASAN expects
+	  the stack to map directly to the KASAN shadow map using a formula
+	  that is incorrect if the stack is in vmalloc space.
+
 source "kernel/gcov/Kconfig"
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index d1212b84fb83..f0a72e98e5a4 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -56,7 +56,7 @@ struct thread_info {
 #define alloc_thread_stack_node(tsk, node)	((unsigned long *) 0)
 #define task_thread_info(tsk)	((struct thread_info *) 0)
 #endif
-#define free_thread_stack(ti)	/* nothing */
+#define free_thread_stack(tsk)	/* nothing */
 #define task_stack_page(tsk)	((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 253538f29ade..26869dba21f1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1918,6 +1918,9 @@ struct task_struct {
 #ifdef CONFIG_MMU
 	struct task_struct *oom_reaper_list;
 #endif
+#ifdef CONFIG_VMAP_STACK
+	struct vm_struct *stack_vm_area;
+#endif
 /* CPU-specific state of this task */
 	struct thread_struct thread;
 /*
@@ -1934,6 +1937,18 @@ extern int arch_task_struct_size __read_mostly;
 # define arch_task_struct_size (sizeof(struct task_struct))
 #endif
 
+#ifdef CONFIG_VMAP_STACK
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+	return t->stack_vm_area;
+}
+#else
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+	return NULL;
+}
+#endif
+
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 146c9840c079..d2a64a0413be 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -158,19 +158,39 @@ void __weak arch_release_thread_stack(unsigned long *stack)
  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
  * kmemcache based allocator.
  */
-# if THREAD_SIZE >= PAGE_SIZE
-static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
-						  int node)
+# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
 {
+#ifdef CONFIG_VMAP_STACK
+	void *stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
+					   VMALLOC_START, VMALLOC_END,
+					   THREADINFO_GFP | __GFP_HIGHMEM,
+					   PAGE_KERNEL,
+					   0, node,
+					   __builtin_return_address(0));
+
+	/*
+	 * We can't call find_vm_area() in interrupt context, and
+	 * free_thread_stack can be called in interrupt context, so cache
+	 * the vm_struct.
+	 */
+	if (stack)
+		tsk->stack_vm_area = find_vm_area(stack);
+	return stack;
+#else
 	struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
 						  THREAD_SIZE_ORDER);
 
 	return page ? page_address(page) : NULL;
+#endif
 }
 
-static inline void free_thread_stack(unsigned long *stack)
+static inline void free_thread_stack(struct task_struct *tsk)
 {
-	free_kmem_pages((unsigned long)stack, THREAD_SIZE_ORDER);
+	if (task_stack_vm_area(tsk))
+		vfree(tsk->stack);
+	else
+		free_kmem_pages((unsigned long)tsk->stack, THREAD_SIZE_ORDER);
 }
 # else
 static struct kmem_cache *thread_stack_cache;
@@ -181,9 +201,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
 	return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
 }
 
-static void free_thread_stack(unsigned long *stack)
+static void free_thread_stack(struct task_struct *tsk)
 {
-	kmem_cache_free(thread_stack_cache, stack);
+	kmem_cache_free(thread_stack_cache, tsk->stack);
 }
 
 void thread_stack_cache_init(void)
@@ -213,24 +233,47 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(unsigned long *stack, int account)
+static void account_kernel_stack(struct task_struct *tsk, int account)
 {
-	/* All stack pages are in the same zone and belong to the same memcg. */
-	struct page *first_page = virt_to_page(stack);
+	void *stack = task_stack_page(tsk);
+	struct vm_struct *vm = task_stack_vm_area(tsk);
+
+	BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
+
+	if (vm) {
+		int i;
+
+		BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
+
+		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
+			mod_zone_page_state(page_zone(vm->pages[i]),
+					    NR_KERNEL_STACK_KB,
+					    PAGE_SIZE / 1024 * account);
+		}
 
-	mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
-			    THREAD_SIZE / 1024 * account);
+		/* All stack pages belong to the same memcg. */
+		memcg_kmem_update_page_stat(vm->pages[0], MEMCG_KERNEL_STACK_KB,
+					    account * (THREAD_SIZE / 1024));
+	} else {
+		/*
+		 * All stack pages are in the same zone and belong to the
+		 * same memcg.
+		 */
+		struct page *first_page = virt_to_page(stack);
+
+		mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
+				    THREAD_SIZE / 1024 * account);
 
-	memcg_kmem_update_page_stat(
-		first_page, MEMCG_KERNEL_STACK_KB,
-		account * (THREAD_SIZE / 1024));
+		memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB,
+					    account * (THREAD_SIZE / 1024));
+	}
 }
 
 void free_task(struct task_struct *tsk)
 {
-	account_kernel_stack(tsk->stack, -1);
+	account_kernel_stack(tsk, -1);
 	arch_release_thread_stack(tsk->stack);
-	free_thread_stack(tsk->stack);
+	free_thread_stack(tsk);
 	rt_mutex_debug_task_free(tsk);
 	ftrace_graph_exit_task(tsk);
 	put_seccomp_filter(tsk);
@@ -342,6 +385,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
 	struct task_struct *tsk;
 	unsigned long *stack;
+	struct vm_struct *stack_vm_area;
 	int err;
 
 	if (node == NUMA_NO_NODE)
@@ -354,11 +398,23 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	if (!stack)
 		goto free_tsk;
 
+	stack_vm_area = task_stack_vm_area(tsk);
+
 	err = arch_dup_task_struct(tsk, orig);
+
+	/*
+	 * arch_dup_task_struct clobbers the stack-related fields.  Make
+	 * sure they're properly initialized before using any stack-related
+	 * functions again.
+	 */
+	tsk->stack = stack;
+#ifdef CONFIG_VMAP_STACK
+	tsk->stack_vm_area = stack_vm_area;
+#endif
+
 	if (err)
 		goto free_stack;
 
-	tsk->stack = stack;
 #ifdef CONFIG_SECCOMP
 	/*
 	 * We must handle setting up seccomp filters once we're under
@@ -390,14 +446,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	tsk->task_frag.page = NULL;
 	tsk->wake_q.next = NULL;
 
-	account_kernel_stack(stack, 1);
+	account_kernel_stack(tsk, 1);
 
 	kcov_task_init(tsk);
 
 	return tsk;
 
 free_stack:
-	free_thread_stack(stack);
+	free_thread_stack(tsk);
 free_tsk:
 	free_task_struct(tsk);
 	return NULL;
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: Andy Lutomirski <luto@kernel.org>
To: x86@kernel.org, linux-kernel@vger.kernel.org
Cc: linux-arch@vger.kernel.org, Borislav Petkov <bp@alien8.de>,
	Nadav Amit <nadav.amit@gmail.com>,
	Kees Cook <keescook@chromium.org>,
	Brian Gerst <brgerst@gmail.com>,
	"kernel-hardening@lists.openwall.com"
	<kernel-hardening@lists.openwall.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Josh Poimboeuf <jpoimboe@redhat.com>, Jann Horn <jann@thejh.net>,
	Heiko Carstens <heiko.carstens@de.ibm.com>,
	Andy Lutomirski <luto@kernel.org>,
	Oleg Nesterov <oleg@redhat.com>
Subject: [kernel-hardening] [PATCH v5 07/32] fork: Add generic vmalloced stack support
Date: Mon, 11 Jul 2016 13:53:40 -0700	[thread overview]
Message-ID: <51f37059777910f989486657d1e27ded4c7edad2.1468270393.git.luto@kernel.org> (raw)
In-Reply-To: <cover.1468270393.git.luto@kernel.org>
In-Reply-To: <cover.1468270393.git.luto@kernel.org>

If CONFIG_VMAP_STACK is selected, kernel stacks are allocated with
vmalloc_node.

grsecurity has had a similar feature (called
GRKERNSEC_KSTACKOVERFLOW) for a long time.

Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
 arch/Kconfig                        | 33 +++++++++++++
 arch/ia64/include/asm/thread_info.h |  2 +-
 include/linux/sched.h               | 15 ++++++
 kernel/fork.c                       | 96 +++++++++++++++++++++++++++++--------
 4 files changed, 125 insertions(+), 21 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 15996290fed4..14398fcf8292 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -661,4 +661,37 @@ config ARCH_NO_COHERENT_DMA_MMAP
 config CPU_NO_EFFICIENT_FFS
 	def_bool n
 
+config HAVE_ARCH_VMAP_STACK
+	def_bool n
+	help
+	  An arch should select this symbol if it can support kernel stacks
+	  in vmalloc space.  This means:
+
+	  - vmalloc space must be large enough to hold many kernel stacks.
+	    This may rule out many 32-bit architectures.
+
+	  - Stacks in vmalloc space need to work reliably.  For example, if
+	    vmap page tables are created on demand, either this mechanism
+	    needs to work while the stack points to a virtual address with
+	    unpopulated page tables or arch code (switch_to and switch_mm,
+	    most likely) needs to ensure that the stack's page table entries
+	    are populated before running on a possibly unpopulated stack.
+
+	  - If the stack overflows into a guard page, something reasonable
+	    should happen.  The definition of "reasonable" is flexible, but
+	    instantly rebooting without logging anything would be unfriendly.
+
+config VMAP_STACK
+	bool "Use a virtually-mapped stack"
+	depends on HAVE_ARCH_VMAP_STACK && !KASAN
+	---help---
+	  Enable this if you want the use virtually-mapped kernel stacks
+	  with guard pages.  This causes kernel stack overflows to be
+	  caught immediately rather than causing difficult-to-diagnose
+	  corruption.
+
+	  This is presently incompatible with KASAN because KASAN expects
+	  the stack to map directly to the KASAN shadow map using a formula
+	  that is incorrect if the stack is in vmalloc space.
+
 source "kernel/gcov/Kconfig"
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index d1212b84fb83..f0a72e98e5a4 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -56,7 +56,7 @@ struct thread_info {
 #define alloc_thread_stack_node(tsk, node)	((unsigned long *) 0)
 #define task_thread_info(tsk)	((struct thread_info *) 0)
 #endif
-#define free_thread_stack(ti)	/* nothing */
+#define free_thread_stack(tsk)	/* nothing */
 #define task_stack_page(tsk)	((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 253538f29ade..26869dba21f1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1918,6 +1918,9 @@ struct task_struct {
 #ifdef CONFIG_MMU
 	struct task_struct *oom_reaper_list;
 #endif
+#ifdef CONFIG_VMAP_STACK
+	struct vm_struct *stack_vm_area;
+#endif
 /* CPU-specific state of this task */
 	struct thread_struct thread;
 /*
@@ -1934,6 +1937,18 @@ extern int arch_task_struct_size __read_mostly;
 # define arch_task_struct_size (sizeof(struct task_struct))
 #endif
 
+#ifdef CONFIG_VMAP_STACK
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+	return t->stack_vm_area;
+}
+#else
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+	return NULL;
+}
+#endif
+
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 146c9840c079..d2a64a0413be 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -158,19 +158,39 @@ void __weak arch_release_thread_stack(unsigned long *stack)
  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
  * kmemcache based allocator.
  */
-# if THREAD_SIZE >= PAGE_SIZE
-static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
-						  int node)
+# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
 {
+#ifdef CONFIG_VMAP_STACK
+	void *stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
+					   VMALLOC_START, VMALLOC_END,
+					   THREADINFO_GFP | __GFP_HIGHMEM,
+					   PAGE_KERNEL,
+					   0, node,
+					   __builtin_return_address(0));
+
+	/*
+	 * We can't call find_vm_area() in interrupt context, and
+	 * free_thread_stack can be called in interrupt context, so cache
+	 * the vm_struct.
+	 */
+	if (stack)
+		tsk->stack_vm_area = find_vm_area(stack);
+	return stack;
+#else
 	struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
 						  THREAD_SIZE_ORDER);
 
 	return page ? page_address(page) : NULL;
+#endif
 }
 
-static inline void free_thread_stack(unsigned long *stack)
+static inline void free_thread_stack(struct task_struct *tsk)
 {
-	free_kmem_pages((unsigned long)stack, THREAD_SIZE_ORDER);
+	if (task_stack_vm_area(tsk))
+		vfree(tsk->stack);
+	else
+		free_kmem_pages((unsigned long)tsk->stack, THREAD_SIZE_ORDER);
 }
 # else
 static struct kmem_cache *thread_stack_cache;
@@ -181,9 +201,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
 	return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
 }
 
-static void free_thread_stack(unsigned long *stack)
+static void free_thread_stack(struct task_struct *tsk)
 {
-	kmem_cache_free(thread_stack_cache, stack);
+	kmem_cache_free(thread_stack_cache, tsk->stack);
 }
 
 void thread_stack_cache_init(void)
@@ -213,24 +233,47 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(unsigned long *stack, int account)
+static void account_kernel_stack(struct task_struct *tsk, int account)
 {
-	/* All stack pages are in the same zone and belong to the same memcg. */
-	struct page *first_page = virt_to_page(stack);
+	void *stack = task_stack_page(tsk);
+	struct vm_struct *vm = task_stack_vm_area(tsk);
+
+	BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
+
+	if (vm) {
+		int i;
+
+		BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
+
+		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
+			mod_zone_page_state(page_zone(vm->pages[i]),
+					    NR_KERNEL_STACK_KB,
+					    PAGE_SIZE / 1024 * account);
+		}
 
-	mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
-			    THREAD_SIZE / 1024 * account);
+		/* All stack pages belong to the same memcg. */
+		memcg_kmem_update_page_stat(vm->pages[0], MEMCG_KERNEL_STACK_KB,
+					    account * (THREAD_SIZE / 1024));
+	} else {
+		/*
+		 * All stack pages are in the same zone and belong to the
+		 * same memcg.
+		 */
+		struct page *first_page = virt_to_page(stack);
+
+		mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
+				    THREAD_SIZE / 1024 * account);
 
-	memcg_kmem_update_page_stat(
-		first_page, MEMCG_KERNEL_STACK_KB,
-		account * (THREAD_SIZE / 1024));
+		memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB,
+					    account * (THREAD_SIZE / 1024));
+	}
 }
 
 void free_task(struct task_struct *tsk)
 {
-	account_kernel_stack(tsk->stack, -1);
+	account_kernel_stack(tsk, -1);
 	arch_release_thread_stack(tsk->stack);
-	free_thread_stack(tsk->stack);
+	free_thread_stack(tsk);
 	rt_mutex_debug_task_free(tsk);
 	ftrace_graph_exit_task(tsk);
 	put_seccomp_filter(tsk);
@@ -342,6 +385,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
 	struct task_struct *tsk;
 	unsigned long *stack;
+	struct vm_struct *stack_vm_area;
 	int err;
 
 	if (node == NUMA_NO_NODE)
@@ -354,11 +398,23 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	if (!stack)
 		goto free_tsk;
 
+	stack_vm_area = task_stack_vm_area(tsk);
+
 	err = arch_dup_task_struct(tsk, orig);
+
+	/*
+	 * arch_dup_task_struct clobbers the stack-related fields.  Make
+	 * sure they're properly initialized before using any stack-related
+	 * functions again.
+	 */
+	tsk->stack = stack;
+#ifdef CONFIG_VMAP_STACK
+	tsk->stack_vm_area = stack_vm_area;
+#endif
+
 	if (err)
 		goto free_stack;
 
-	tsk->stack = stack;
 #ifdef CONFIG_SECCOMP
 	/*
 	 * We must handle setting up seccomp filters once we're under
@@ -390,14 +446,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	tsk->task_frag.page = NULL;
 	tsk->wake_q.next = NULL;
 
-	account_kernel_stack(stack, 1);
+	account_kernel_stack(tsk, 1);
 
 	kcov_task_init(tsk);
 
 	return tsk;
 
 free_stack:
-	free_thread_stack(stack);
+	free_thread_stack(tsk);
 free_tsk:
 	free_task_struct(tsk);
 	return NULL;
-- 
2.7.4

  parent reply	other threads:[~2016-07-11 20:54 UTC|newest]

Thread overview: 176+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-11 20:53 [PATCH v5 00/32] virtually mapped stacks and thread_info cleanup Andy Lutomirski
2016-07-11 20:53 ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53 ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 01/32] bluetooth: Switch SMP to crypto_cipher_encrypt_one() Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-14 19:10   ` Andy Lutomirski
2016-07-14 19:10     ` [kernel-hardening] " Andy Lutomirski
2016-07-14 19:10     ` Andy Lutomirski
2016-07-14 20:30     ` Marcel Holtmann
2016-07-14 20:30       ` [kernel-hardening] " Marcel Holtmann
2016-07-14 20:30       ` Marcel Holtmann
2016-07-14 20:41     ` David Miller
2016-07-14 20:41       ` [kernel-hardening] " David Miller
2016-07-11 20:53 ` [PATCH v5 02/32] x86/mm/hotplug: Don't remove PGD entries in remove_pagetable() Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 03/32] x86/cpa: In populate_pgd, don't set the pgd entry until it's populated Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-22  4:43   ` [kernel-hardening] " Valdis.Kletnieks
2016-07-22  4:43     ` Valdis.Kletnieks
2016-07-22  5:34     ` [kernel-hardening] " Andy Lutomirski
2016-07-22  5:34       ` Andy Lutomirski
2016-07-22  5:34       ` Andy Lutomirski
2016-07-22 10:18       ` Mike Krinkin
2016-07-22 10:21       ` Ingo Molnar
2016-07-22 18:21         ` Andy Lutomirski
2016-07-22 18:21           ` Andy Lutomirski
2016-07-22 18:31           ` Andy Lutomirski
2016-07-22 18:31             ` Andy Lutomirski
2016-07-22 20:11           ` Ingo Molnar
2016-07-22 20:11             ` Ingo Molnar
2016-07-22 20:11             ` Ingo Molnar
2016-07-23  5:21       ` [kernel-hardening] " Valdis.Kletnieks
2016-07-23  5:21         ` Valdis.Kletnieks
2016-07-23 14:58         ` [kernel-hardening] " Nicolai Stange
2016-07-28  9:26           ` Valdis.Kletnieks
2016-07-28  9:26             ` Valdis.Kletnieks
2016-07-11 20:53 ` [PATCH v5 04/32] x86/mm: Remove kernel_unmap_pages_in_pgd() and efi_cleanup_page_tables() Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 05/32] mm: Track NR_KERNEL_STACK in KiB instead of number of stacks Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 06/32] mm: Fix memcg stack accounting for sub-page stacks Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` Andy Lutomirski [this message]
2016-07-11 20:53   ` [kernel-hardening] [PATCH v5 07/32] fork: Add generic vmalloced stack support Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 08/32] dma-api: Teach the "DMA-from-stack" check about vmapped stacks Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 09/32] x86/dumpstack: When OOPSing, rewind the stack before do_exit() Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 10/32] x86/dumpstack: Honor supplied @regs arg Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 11/32] x86/dumpstack: Try harder to get a call trace on stack overflow Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 12/32] x86/dumpstack/64: Handle faults when printing the "Stack:" part of an OOPS Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 13/32] x86/mm/64: In vmalloc_fault(), use CR3 instead of current->active_mm Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-12 17:51   ` [kernel-hardening] " Dave Hansen
2016-07-12 18:03     ` Andy Lutomirski
2016-07-12 18:03       ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 14/32] x86/mm/64: Enable vmapped stacks Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-13  7:53   ` Ingo Molnar
2016-07-13  7:53     ` [kernel-hardening] " Ingo Molnar
2016-07-13  7:53     ` Ingo Molnar
2016-07-13 18:42     ` Andy Lutomirski
2016-07-13 18:42       ` [kernel-hardening] " Andy Lutomirski
2016-07-13 18:42       ` Andy Lutomirski
2016-07-14  8:34       ` Ingo Molnar
2016-07-14  8:34         ` [kernel-hardening] " Ingo Molnar
2016-07-14  8:34         ` Ingo Molnar
2016-07-14 16:51         ` Andy Lutomirski
2016-07-14 16:51           ` [kernel-hardening] " Andy Lutomirski
2016-07-14 16:51           ` Andy Lutomirski
2016-07-14 18:45           ` Ingo Molnar
2016-07-14 18:45             ` [kernel-hardening] " Ingo Molnar
2016-07-14 18:45             ` Ingo Molnar
2016-07-11 20:53 ` [PATCH v5 15/32] x86/mm: Improve stack-overflow #PF handling Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 16/32] x86: Move uaccess_err and sig_on_uaccess_err to thread_struct Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 17/32] x86: Move addr_limit " Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 18/32] signal: Consolidate {TS,TLF}_RESTORE_SIGMASK code Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-12 11:57   ` Brian Gerst
2016-07-12 11:57     ` Brian Gerst
2016-07-12 11:57     ` [kernel-hardening] " Brian Gerst
2016-07-12 11:57     ` [PATCH v5 18/32] signal: Consolidate {TS, TLF}_RESTORE_SIGMASK code Brian Gerst
2016-07-12 11:57     ` [PATCH v5 18/32] signal: Consolidate {TS,TLF}_RESTORE_SIGMASK code Brian Gerst
2016-07-12 11:57     ` Brian Gerst
2016-07-12 11:57     ` Brian Gerst
2016-07-12 23:01     ` Andy Lutomirski
2016-07-12 23:01       ` [kernel-hardening] " Andy Lutomirski
2016-07-12 23:01       ` [PATCH v5 18/32] signal: Consolidate {TS, TLF}_RESTORE_SIGMASK code Andy Lutomirski
2016-07-12 23:01       ` [PATCH v5 18/32] signal: Consolidate {TS,TLF}_RESTORE_SIGMASK code Andy Lutomirski
2016-07-12 23:01       ` Andy Lutomirski
2016-07-12 23:01       ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 19/32] x86/smp: Remove stack_smp_processor_id() Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 20/32] x86/smp: Remove unnecessary initialization of thread_info::cpu Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 21/32] x86/asm: Move 'status' from struct thread_info to struct thread_struct Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 22/32] kdb: Use task_cpu() instead of task_thread_info()->cpu Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 23/32] printk: When dumping regs, show the stack, not thread_info Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 24/32] x86/entry: Get rid of pt_regs_to_thread_info() Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 25/32] um: Stop conflating task_struct::stack with thread_info Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:53 ` [PATCH v5 26/32] sched: Allow putting thread_info into task_struct Andy Lutomirski
2016-07-11 20:53   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:53   ` Andy Lutomirski
2016-07-11 20:54 ` [PATCH v5 27/32] x86: Move " Andy Lutomirski
2016-07-11 20:54   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:54   ` Andy Lutomirski
2016-07-11 20:54 ` [PATCH v5 28/32] sched: Add try_get_task_stack() and put_task_stack() Andy Lutomirski
2016-07-11 20:54   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:54   ` Andy Lutomirski
2016-07-11 20:54 ` [PATCH v5 29/32] kthread: to_live_kthread() needs try_get_task_stack() Andy Lutomirski
2016-07-11 20:54   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:54   ` Andy Lutomirski
2016-07-11 20:54 ` [PATCH v5 30/32] x86/dumpstack: Pin the target stack in save_stack_trace_tsk() Andy Lutomirski
2016-07-11 20:54   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:54   ` Andy Lutomirski
2016-07-11 20:54 ` [PATCH v5 31/32] sched: Free the stack early if CONFIG_THREAD_INFO_IN_TASK Andy Lutomirski
2016-07-11 20:54   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:54   ` Andy Lutomirski
2016-07-11 20:54 ` [PATCH v5 32/32] fork: Cache two thread stacks per cpu if CONFIG_VMAP_STACK is set Andy Lutomirski
2016-07-11 20:54   ` [kernel-hardening] " Andy Lutomirski
2016-07-11 20:54   ` Andy Lutomirski
2016-07-12  8:56 ` [PATCH v5 00/32] virtually mapped stacks and thread_info cleanup Herbert Xu
2016-07-12  8:56   ` [kernel-hardening] " Herbert Xu
2016-07-12  8:56   ` Herbert Xu
2016-07-12  8:56   ` Herbert Xu
2016-07-13  8:54 ` Christian Borntraeger
2016-07-13  8:54   ` Christian Borntraeger
2016-07-13  8:54   ` [kernel-hardening] " Christian Borntraeger
2016-07-13 18:36   ` Andy Lutomirski
2016-07-13 18:36     ` Andy Lutomirski
2016-07-13 18:36     ` [kernel-hardening] " Andy Lutomirski
2016-07-13 18:53     ` Christian Borntraeger
2016-07-13 18:53       ` Christian Borntraeger
2016-07-13 18:53       ` [kernel-hardening] " Christian Borntraeger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=51f37059777910f989486657d1e27ded4c7edad2.1468270393.git.luto@kernel.org \
    --to=luto@kernel.org \
    --cc=bp@alien8.de \
    --cc=brgerst@gmail.com \
    --cc=heiko.carstens@de.ibm.com \
    --cc=jann@thejh.net \
    --cc=jpoimboe@redhat.com \
    --cc=keescook@chromium.org \
    --cc=kernel-hardening@lists.openwall.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=nadav.amit@gmail.com \
    --cc=oleg@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.