linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] scs: switch to vmapped shadow stacks
@ 2020-10-22 20:23 Sami Tolvanen
  2020-10-22 20:23 ` [PATCH 1/2] " Sami Tolvanen
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Sami Tolvanen @ 2020-10-22 20:23 UTC (permalink / raw)
  To: Will Deacon, Catalin Marinas
  Cc: Mark Rutland, James Morse, Ard Biesheuvel, Kees Cook,
	linux-arm-kernel, linux-kernel, Sami Tolvanen

As discussed a few months ago [1][2], virtually mapped shadow call stacks
are better for safety and robustness. This series dusts off the VMAP
option from the original SCS patch series and switches the kernel to use
virtually mapped shadow stacks unconditionally when SCS is enabled.

 [1] https://lore.kernel.org/lkml/20200515172355.GD23334@willie-the-truck/
 [2] https://lore.kernel.org/lkml/20200427220942.GB80713@google.com/


Sami Tolvanen (2):
  scs: switch to vmapped shadow stacks
  arm64: scs: use vmapped IRQ and SDEI shadow stacks

 arch/arm64/include/asm/scs.h | 21 ++++++++++-
 arch/arm64/kernel/entry.S    |  6 ++--
 arch/arm64/kernel/irq.c      |  2 ++
 arch/arm64/kernel/scs.c      | 67 +++++++++++++++++++++++++++++++++---
 arch/arm64/kernel/sdei.c     |  7 ++++
 include/linux/scs.h          | 15 +++-----
 kernel/scs.c                 | 67 ++++++++++++++++++++++++++++++------
 7 files changed, 156 insertions(+), 29 deletions(-)


base-commit: 96485e4462604744d66bf4301557d996d80b85eb
-- 
2.29.0.rc1.297.gfa9743e501-goog


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/2] scs: switch to vmapped shadow stacks
  2020-10-22 20:23 [PATCH 0/2] scs: switch to vmapped shadow stacks Sami Tolvanen
@ 2020-10-22 20:23 ` Sami Tolvanen
  2020-10-22 22:38   ` Kees Cook
  2020-11-19 13:00   ` Will Deacon
  2020-10-22 20:23 ` [PATCH 2/2] arm64: scs: use vmapped IRQ and SDEI " Sami Tolvanen
  2020-11-17 17:35 ` [PATCH 0/2] scs: switch to vmapped " Catalin Marinas
  2 siblings, 2 replies; 11+ messages in thread
From: Sami Tolvanen @ 2020-10-22 20:23 UTC (permalink / raw)
  To: Will Deacon, Catalin Marinas
  Cc: Mark Rutland, James Morse, Ard Biesheuvel, Kees Cook,
	linux-arm-kernel, linux-kernel, Sami Tolvanen

The kernel currently uses kmem_cache to allocate shadow call stacks,
which means an overflow may not be immediately detected and can
potentially result in another task's shadow stack to be overwritten.

This change switches SCS to use virtually mapped shadow stacks,
which increases shadow stack size to a full page and provides more
robust overflow detection similarly to VMAP_STACK.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
 include/linux/scs.h |  7 +----
 kernel/scs.c        | 63 ++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 55 insertions(+), 15 deletions(-)

diff --git a/include/linux/scs.h b/include/linux/scs.h
index 6dec390cf154..86e3c4b7b714 100644
--- a/include/linux/scs.h
+++ b/include/linux/scs.h
@@ -15,12 +15,7 @@
 
 #ifdef CONFIG_SHADOW_CALL_STACK
 
-/*
- * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
- * architecture) provided ~40% safety margin on stack usage while keeping
- * memory allocation overhead reasonable.
- */
-#define SCS_SIZE		SZ_1K
+#define SCS_SIZE		PAGE_SIZE
 #define GFP_SCS			(GFP_KERNEL | __GFP_ZERO)
 
 /* An illegal pointer value to mark the end of the shadow stack. */
diff --git a/kernel/scs.c b/kernel/scs.c
index 4ff4a7ba0094..2136edba548d 100644
--- a/kernel/scs.c
+++ b/kernel/scs.c
@@ -5,50 +5,95 @@
  * Copyright (C) 2019 Google LLC
  */
 
+#include <linux/cpuhotplug.h>
 #include <linux/kasan.h>
 #include <linux/mm.h>
 #include <linux/scs.h>
-#include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/vmstat.h>
 
-static struct kmem_cache *scs_cache;
-
 static void __scs_account(void *s, int account)
 {
-	struct page *scs_page = virt_to_page(s);
+	struct page *scs_page = vmalloc_to_page(s);
 
 	mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
 			    account * (SCS_SIZE / SZ_1K));
 }
 
+/* Matches NR_CACHED_STACKS for VMAP_STACK */
+#define NR_CACHED_SCS 2
+static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
+
 static void *scs_alloc(int node)
 {
-	void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
+	int i;
+	void *s;
+
+	for (i = 0; i < NR_CACHED_SCS; i++) {
+		s = this_cpu_xchg(scs_cache[i], NULL);
+		if (s) {
+			memset(s, 0, SCS_SIZE);
+			goto out;
+		}
+	}
+
+	/*
+	 * We allocate a full page for the shadow stack, which should be
+	 * more than we need. Check the assumption nevertheless.
+	 */
+	BUILD_BUG_ON(SCS_SIZE > PAGE_SIZE);
+
+	s = __vmalloc_node_range(PAGE_SIZE, SCS_SIZE,
+				 VMALLOC_START, VMALLOC_END,
+				 GFP_SCS, PAGE_KERNEL, 0,
+				 node, __builtin_return_address(0));
 
 	if (!s)
 		return NULL;
 
+out:
 	*__scs_magic(s) = SCS_END_MAGIC;
 
 	/*
 	 * Poison the allocation to catch unintentional accesses to
 	 * the shadow stack when KASAN is enabled.
 	 */
-	kasan_poison_object_data(scs_cache, s);
+	kasan_poison_vmalloc(s, SCS_SIZE);
 	__scs_account(s, 1);
 	return s;
 }
 
 static void scs_free(void *s)
 {
+	int i;
+
 	__scs_account(s, -1);
-	kasan_unpoison_object_data(scs_cache, s);
-	kmem_cache_free(scs_cache, s);
+	kasan_unpoison_vmalloc(s, SCS_SIZE);
+
+	for (i = 0; i < NR_CACHED_SCS; i++)
+		if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
+			return;
+
+	vfree_atomic(s);
+}
+
+static int scs_cleanup(unsigned int cpu)
+{
+	int i;
+	void **cache = per_cpu_ptr(scs_cache, cpu);
+
+	for (i = 0; i < NR_CACHED_SCS; i++) {
+		vfree(cache[i]);
+		cache[i] = NULL;
+	}
+
+	return 0;
 }
 
 void __init scs_init(void)
 {
-	scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
+	cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
+			  scs_cleanup);
 }
 
 int scs_prepare(struct task_struct *tsk, int node)
-- 
2.29.0.rc1.297.gfa9743e501-goog


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/2] arm64: scs: use vmapped IRQ and SDEI shadow stacks
  2020-10-22 20:23 [PATCH 0/2] scs: switch to vmapped shadow stacks Sami Tolvanen
  2020-10-22 20:23 ` [PATCH 1/2] " Sami Tolvanen
@ 2020-10-22 20:23 ` Sami Tolvanen
  2020-10-22 22:38   ` Kees Cook
  2020-11-19 13:11   ` Will Deacon
  2020-11-17 17:35 ` [PATCH 0/2] scs: switch to vmapped " Catalin Marinas
  2 siblings, 2 replies; 11+ messages in thread
From: Sami Tolvanen @ 2020-10-22 20:23 UTC (permalink / raw)
  To: Will Deacon, Catalin Marinas
  Cc: Mark Rutland, James Morse, Ard Biesheuvel, Kees Cook,
	linux-arm-kernel, linux-kernel, Sami Tolvanen

Use scs_alloc() to allocate also IRQ and SDEI shadow stacks instead of
using statically allocated stacks.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
 arch/arm64/include/asm/scs.h | 21 ++++++++++-
 arch/arm64/kernel/entry.S    |  6 ++--
 arch/arm64/kernel/irq.c      |  2 ++
 arch/arm64/kernel/scs.c      | 67 +++++++++++++++++++++++++++++++++---
 arch/arm64/kernel/sdei.c     |  7 ++++
 include/linux/scs.h          |  8 ++---
 kernel/scs.c                 |  4 +--
 7 files changed, 101 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index eaa2cd92e4c1..e9d2c3e67ff9 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -24,6 +24,25 @@
 	.endm
 #endif /* CONFIG_SHADOW_CALL_STACK */
 
-#endif /* __ASSEMBLY __ */
+#else /* __ASSEMBLY__ */
+
+#include <linux/scs.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+extern void scs_init_irq(void);
+
+extern void scs_free_sdei(void);
+extern int scs_init_sdei(void);
+
+#else
+
+static inline void scs_init_irq(void) {}
+static inline void scs_free_sdei(void) {}
+static inline int scs_init_sdei(void) { return -EOPNOTSUPP; }
+
+#endif
+
+#endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_SCS_H */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f30007dff35f..0f76fe8142e4 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -438,7 +438,7 @@ SYM_CODE_END(__swpan_exit_el0)
 
 #ifdef CONFIG_SHADOW_CALL_STACK
 	/* also switch to the irq shadow stack */
-	adr_this_cpu scs_sp, irq_shadow_call_stack, x26
+	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
 #endif
 
 9998:
@@ -1094,9 +1094,9 @@ SYM_CODE_START(__sdei_asm_handler)
 #ifdef CONFIG_SHADOW_CALL_STACK
 	/* Use a separate shadow call stack for normal and critical events */
 	cbnz	w4, 3f
-	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
+	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
 	b	4f
-3:	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
+3:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
 4:
 #endif
 
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 9cf2fb87584a..54ba3725bc0e 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -20,6 +20,7 @@
 #include <linux/seq_file.h>
 #include <linux/vmalloc.h>
 #include <asm/daifflags.h>
+#include <asm/scs.h>
 #include <asm/vmap_stack.h>
 
 /* Only access this in an NMI enter/exit */
@@ -54,6 +55,7 @@ static void init_irq_stacks(void)
 void __init init_IRQ(void)
 {
 	init_irq_stacks();
+	scs_init_irq();
 	irqchip_init();
 	if (!handle_arch_irq)
 		panic("No interrupt controller found.");
diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c
index e8f7ff45dd8f..f85cebf8122a 100644
--- a/arch/arm64/kernel/scs.c
+++ b/arch/arm64/kernel/scs.c
@@ -6,11 +6,70 @@
  */
 
 #include <linux/percpu.h>
-#include <linux/scs.h>
+#include <asm/scs.h>
 
-DEFINE_SCS(irq_shadow_call_stack);
+DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
+
+DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
+DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
 
 #ifdef CONFIG_ARM_SDE_INTERFACE
-DEFINE_SCS(sdei_shadow_call_stack_normal);
-DEFINE_SCS(sdei_shadow_call_stack_critical);
+DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
+DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
 #endif
+
+void scs_init_irq(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		per_cpu(irq_shadow_call_stack_ptr, cpu) =
+			scs_alloc(cpu_to_node(cpu));
+}
+
+
+void scs_free_sdei(void)
+{
+	int cpu;
+	void *s;
+
+	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
+		return;
+
+	for_each_possible_cpu(cpu) {
+		s = per_cpu(sdei_shadow_call_stack_normal_ptr, cpu);
+		if (s)
+			scs_free(s);
+
+		s = per_cpu(sdei_shadow_call_stack_critical_ptr, cpu);
+		if (s)
+			scs_free(s);
+	}
+}
+
+int scs_init_sdei(void)
+{
+	int cpu;
+	void *s;
+
+	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
+		return 0;
+
+	for_each_possible_cpu(cpu) {
+		s = scs_alloc(cpu_to_node(cpu));
+		if (!s)
+			goto err;
+		per_cpu(sdei_shadow_call_stack_normal_ptr, cpu) = s;
+
+		s = scs_alloc(cpu_to_node(cpu));
+		if (!s)
+			goto err;
+		per_cpu(sdei_shadow_call_stack_critical_ptr, cpu) = s;
+	}
+
+	return 0;
+
+err:
+	scs_free_sdei();
+	return -ENOMEM;
+}
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 7689f2031c0c..04519a7cb51d 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -13,6 +13,7 @@
 #include <asm/kprobes.h>
 #include <asm/mmu.h>
 #include <asm/ptrace.h>
+#include <asm/scs.h>
 #include <asm/sections.h>
 #include <asm/stacktrace.h>
 #include <asm/sysreg.h>
@@ -138,6 +139,12 @@ unsigned long sdei_arch_get_entry_point(int conduit)
 			return 0;
 	}
 
+	if (scs_init_sdei()) {
+		if (IS_ENABLED(CONFIG_VMAP_STACK))
+			free_sdei_stacks();
+		return 0;
+	}
+
 	sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
diff --git a/include/linux/scs.h b/include/linux/scs.h
index 86e3c4b7b714..6b35a83576d4 100644
--- a/include/linux/scs.h
+++ b/include/linux/scs.h
@@ -21,13 +21,11 @@
 /* An illegal pointer value to mark the end of the shadow stack. */
 #define SCS_END_MAGIC		(0x5f6UL + POISON_POINTER_DELTA)
 
-/* Allocate a static per-CPU shadow stack */
-#define DEFINE_SCS(name)						\
-	DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name)	\
-
 #define task_scs(tsk)		(task_thread_info(tsk)->scs_base)
 #define task_scs_sp(tsk)	(task_thread_info(tsk)->scs_sp)
 
+void *scs_alloc(int node);
+void scs_free(void *s);
 void scs_init(void);
 int scs_prepare(struct task_struct *tsk, int node);
 void scs_release(struct task_struct *tsk);
@@ -56,6 +54,8 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk)
 
 #else /* CONFIG_SHADOW_CALL_STACK */
 
+static inline void *scs_alloc(int node) { return NULL; }
+static inline void scs_free(void *s) {}
 static inline void scs_init(void) {}
 static inline void scs_task_reset(struct task_struct *tsk) {}
 static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
diff --git a/kernel/scs.c b/kernel/scs.c
index 2136edba548d..8df4a92cd939 100644
--- a/kernel/scs.c
+++ b/kernel/scs.c
@@ -24,7 +24,7 @@ static void __scs_account(void *s, int account)
 #define NR_CACHED_SCS 2
 static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
 
-static void *scs_alloc(int node)
+void *scs_alloc(int node)
 {
 	int i;
 	void *s;
@@ -63,7 +63,7 @@ static void *scs_alloc(int node)
 	return s;
 }
 
-static void scs_free(void *s)
+void scs_free(void *s)
 {
 	int i;
 
-- 
2.29.0.rc1.297.gfa9743e501-goog


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/2] scs: switch to vmapped shadow stacks
  2020-10-22 20:23 ` [PATCH 1/2] " Sami Tolvanen
@ 2020-10-22 22:38   ` Kees Cook
  2020-11-19 13:00   ` Will Deacon
  1 sibling, 0 replies; 11+ messages in thread
From: Kees Cook @ 2020-10-22 22:38 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Will Deacon, Catalin Marinas, Mark Rutland, James Morse,
	Ard Biesheuvel, linux-arm-kernel, linux-kernel

On Thu, Oct 22, 2020 at 01:23:54PM -0700, Sami Tolvanen wrote:
> The kernel currently uses kmem_cache to allocate shadow call stacks,
> which means an overflow may not be immediately detected and can
> potentially result in another task's shadow stack to be overwritten.
> 
> This change switches SCS to use virtually mapped shadow stacks,
> which increases shadow stack size to a full page and provides more
> robust overflow detection similarly to VMAP_STACK.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>

Thanks! I much prefer this to kmem. :)

Reviewed-by: Kees Cook <keescook@chromium.org>

-- 
Kees Cook

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/2] arm64: scs: use vmapped IRQ and SDEI shadow stacks
  2020-10-22 20:23 ` [PATCH 2/2] arm64: scs: use vmapped IRQ and SDEI " Sami Tolvanen
@ 2020-10-22 22:38   ` Kees Cook
  2020-11-19 13:11   ` Will Deacon
  1 sibling, 0 replies; 11+ messages in thread
From: Kees Cook @ 2020-10-22 22:38 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Will Deacon, Catalin Marinas, Mark Rutland, James Morse,
	Ard Biesheuvel, linux-arm-kernel, linux-kernel

On Thu, Oct 22, 2020 at 01:23:55PM -0700, Sami Tolvanen wrote:
> Use scs_alloc() to allocate also IRQ and SDEI shadow stacks instead of
> using statically allocated stacks.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>

Reviewed-by: Kees Cook <keescook@chromium.org>

-- 
Kees Cook

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 0/2] scs: switch to vmapped shadow stacks
  2020-10-22 20:23 [PATCH 0/2] scs: switch to vmapped shadow stacks Sami Tolvanen
  2020-10-22 20:23 ` [PATCH 1/2] " Sami Tolvanen
  2020-10-22 20:23 ` [PATCH 2/2] arm64: scs: use vmapped IRQ and SDEI " Sami Tolvanen
@ 2020-11-17 17:35 ` Catalin Marinas
  2020-11-18  9:27   ` Will Deacon
  2 siblings, 1 reply; 11+ messages in thread
From: Catalin Marinas @ 2020-11-17 17:35 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Will Deacon, Mark Rutland, James Morse, Ard Biesheuvel,
	Kees Cook, linux-arm-kernel, linux-kernel

On Thu, Oct 22, 2020 at 01:23:53PM -0700, Sami Tolvanen wrote:
> As discussed a few months ago [1][2], virtually mapped shadow call stacks
> are better for safety and robustness. This series dusts off the VMAP
> option from the original SCS patch series and switches the kernel to use
> virtually mapped shadow stacks unconditionally when SCS is enabled.
> 
>  [1] https://lore.kernel.org/lkml/20200515172355.GD23334@willie-the-truck/
>  [2] https://lore.kernel.org/lkml/20200427220942.GB80713@google.com/
> 
> 
> Sami Tolvanen (2):
>   scs: switch to vmapped shadow stacks
>   arm64: scs: use vmapped IRQ and SDEI shadow stacks

Will, Mark. Any objections to this series? If not, I can queue it for
5.11 via the arm64 tree.

Thanks.

-- 
Catalin

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 0/2] scs: switch to vmapped shadow stacks
  2020-11-17 17:35 ` [PATCH 0/2] scs: switch to vmapped " Catalin Marinas
@ 2020-11-18  9:27   ` Will Deacon
  0 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2020-11-18  9:27 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Sami Tolvanen, Mark Rutland, James Morse, Ard Biesheuvel,
	Kees Cook, linux-arm-kernel, linux-kernel

On Tue, Nov 17, 2020 at 05:35:24PM +0000, Catalin Marinas wrote:
> On Thu, Oct 22, 2020 at 01:23:53PM -0700, Sami Tolvanen wrote:
> > As discussed a few months ago [1][2], virtually mapped shadow call stacks
> > are better for safety and robustness. This series dusts off the VMAP
> > option from the original SCS patch series and switches the kernel to use
> > virtually mapped shadow stacks unconditionally when SCS is enabled.
> > 
> >  [1] https://lore.kernel.org/lkml/20200515172355.GD23334@willie-the-truck/
> >  [2] https://lore.kernel.org/lkml/20200427220942.GB80713@google.com/
> > 
> > 
> > Sami Tolvanen (2):
> >   scs: switch to vmapped shadow stacks
> >   arm64: scs: use vmapped IRQ and SDEI shadow stacks
> 
> Will, Mark. Any objections to this series? If not, I can queue it for
> 5.11 via the arm64 tree.

It's on my list to review, but I have a tonne of other things I have to get
to first.

Will

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/2] scs: switch to vmapped shadow stacks
  2020-10-22 20:23 ` [PATCH 1/2] " Sami Tolvanen
  2020-10-22 22:38   ` Kees Cook
@ 2020-11-19 13:00   ` Will Deacon
  2020-11-20 17:00     ` Sami Tolvanen
  1 sibling, 1 reply; 11+ messages in thread
From: Will Deacon @ 2020-11-19 13:00 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Catalin Marinas, Mark Rutland, James Morse, Ard Biesheuvel,
	Kees Cook, linux-arm-kernel, linux-kernel

Hi Sami,

On Thu, Oct 22, 2020 at 01:23:54PM -0700, Sami Tolvanen wrote:
> The kernel currently uses kmem_cache to allocate shadow call stacks,
> which means an overflow may not be immediately detected and can
> potentially result in another task's shadow stack to be overwritten.
> 
> This change switches SCS to use virtually mapped shadow stacks,
> which increases shadow stack size to a full page and provides more
> robust overflow detection similarly to VMAP_STACK.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> ---
>  include/linux/scs.h |  7 +----
>  kernel/scs.c        | 63 ++++++++++++++++++++++++++++++++++++++-------
>  2 files changed, 55 insertions(+), 15 deletions(-)

Cheers for posting this. I _much_ prefer handling the SCS this way, but I
have some comments on the implementation below.

> diff --git a/include/linux/scs.h b/include/linux/scs.h
> index 6dec390cf154..86e3c4b7b714 100644
> --- a/include/linux/scs.h
> +++ b/include/linux/scs.h
> @@ -15,12 +15,7 @@
>  
>  #ifdef CONFIG_SHADOW_CALL_STACK
>  
> -/*
> - * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
> - * architecture) provided ~40% safety margin on stack usage while keeping
> - * memory allocation overhead reasonable.
> - */
> -#define SCS_SIZE		SZ_1K
> +#define SCS_SIZE		PAGE_SIZE

We could make this SCS_ORDER and then forget about alignment etc.

>  #define GFP_SCS			(GFP_KERNEL | __GFP_ZERO)
>  
>  /* An illegal pointer value to mark the end of the shadow stack. */
> diff --git a/kernel/scs.c b/kernel/scs.c
> index 4ff4a7ba0094..2136edba548d 100644
> --- a/kernel/scs.c
> +++ b/kernel/scs.c
> @@ -5,50 +5,95 @@
>   * Copyright (C) 2019 Google LLC
>   */
>  
> +#include <linux/cpuhotplug.h>
>  #include <linux/kasan.h>
>  #include <linux/mm.h>
>  #include <linux/scs.h>
> -#include <linux/slab.h>
> +#include <linux/vmalloc.h>
>  #include <linux/vmstat.h>
>  
> -static struct kmem_cache *scs_cache;
> -
>  static void __scs_account(void *s, int account)
>  {
> -	struct page *scs_page = virt_to_page(s);
> +	struct page *scs_page = vmalloc_to_page(s);
>  
>  	mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
>  			    account * (SCS_SIZE / SZ_1K));
>  }
>  
> +/* Matches NR_CACHED_STACKS for VMAP_STACK */
> +#define NR_CACHED_SCS 2
> +static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
> +
>  static void *scs_alloc(int node)
>  {
> -	void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
> +	int i;
> +	void *s;
> +
> +	for (i = 0; i < NR_CACHED_SCS; i++) {
> +		s = this_cpu_xchg(scs_cache[i], NULL);
> +		if (s) {
> +			memset(s, 0, SCS_SIZE);
> +			goto out;
> +		}
> +	}
> +
> +	/*
> +	 * We allocate a full page for the shadow stack, which should be
> +	 * more than we need. Check the assumption nevertheless.
> +	 */
> +	BUILD_BUG_ON(SCS_SIZE > PAGE_SIZE);i

With SCS_ORDER, you can drop this.

> +
> +	s = __vmalloc_node_range(PAGE_SIZE, SCS_SIZE,
> +				 VMALLOC_START, VMALLOC_END,
> +				 GFP_SCS, PAGE_KERNEL, 0,
> +				 node, __builtin_return_address(0));

Do we actually need vmalloc here? If we used alloc_pages() + vmap()
instead, then we could avoid the expensive call to vmalloc_to_page()
in __scs_account().

>  
>  	if (!s)
>  		return NULL;
>  
> +out:
>  	*__scs_magic(s) = SCS_END_MAGIC;
>  
>  	/*
>  	 * Poison the allocation to catch unintentional accesses to
>  	 * the shadow stack when KASAN is enabled.
>  	 */
> -	kasan_poison_object_data(scs_cache, s);
> +	kasan_poison_vmalloc(s, SCS_SIZE);
>  	__scs_account(s, 1);
>  	return s;
>  }
>  
>  static void scs_free(void *s)
>  {
> +	int i;
> +
>  	__scs_account(s, -1);
> -	kasan_unpoison_object_data(scs_cache, s);
> -	kmem_cache_free(scs_cache, s);
> +	kasan_unpoison_vmalloc(s, SCS_SIZE);

I don't see the point in unpoisoning here tbh; vfree_atomic() re-poisons
almost immediately, so we should probably defer this to scs_alloc() and
only when picking the stack out of the cache.

> +
> +	for (i = 0; i < NR_CACHED_SCS; i++)

Can you add a comment about the re-entrancy here and why we're using
this_cpu_cmpxchg() please?

Tnanks,

Will

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/2] arm64: scs: use vmapped IRQ and SDEI shadow stacks
  2020-10-22 20:23 ` [PATCH 2/2] arm64: scs: use vmapped IRQ and SDEI " Sami Tolvanen
  2020-10-22 22:38   ` Kees Cook
@ 2020-11-19 13:11   ` Will Deacon
  1 sibling, 0 replies; 11+ messages in thread
From: Will Deacon @ 2020-11-19 13:11 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Catalin Marinas, Mark Rutland, James Morse, Ard Biesheuvel,
	Kees Cook, linux-arm-kernel, linux-kernel

On Thu, Oct 22, 2020 at 01:23:55PM -0700, Sami Tolvanen wrote:
> Use scs_alloc() to allocate also IRQ and SDEI shadow stacks instead of
> using statically allocated stacks.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> ---
>  arch/arm64/include/asm/scs.h | 21 ++++++++++-
>  arch/arm64/kernel/entry.S    |  6 ++--
>  arch/arm64/kernel/irq.c      |  2 ++
>  arch/arm64/kernel/scs.c      | 67 +++++++++++++++++++++++++++++++++---
>  arch/arm64/kernel/sdei.c     |  7 ++++
>  include/linux/scs.h          |  8 ++---
>  kernel/scs.c                 |  4 +--
>  7 files changed, 101 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
> index eaa2cd92e4c1..e9d2c3e67ff9 100644
> --- a/arch/arm64/include/asm/scs.h
> +++ b/arch/arm64/include/asm/scs.h
> @@ -24,6 +24,25 @@
>  	.endm
>  #endif /* CONFIG_SHADOW_CALL_STACK */
>  
> -#endif /* __ASSEMBLY __ */
> +#else /* __ASSEMBLY__ */
> +
> +#include <linux/scs.h>
> +
> +#ifdef CONFIG_SHADOW_CALL_STACK
> +
> +extern void scs_init_irq(void);
> +
> +extern void scs_free_sdei(void);

This is only called on the scs_init_sdei() failure path, so it can be
static. But see below, because I think we should move all of these functions
out of scs.c anyway.

> diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
> index 9cf2fb87584a..54ba3725bc0e 100644
> --- a/arch/arm64/kernel/irq.c
> +++ b/arch/arm64/kernel/irq.c
> @@ -20,6 +20,7 @@
>  #include <linux/seq_file.h>
>  #include <linux/vmalloc.h>
>  #include <asm/daifflags.h>
> +#include <asm/scs.h>
>  #include <asm/vmap_stack.h>
>  
>  /* Only access this in an NMI enter/exit */
> @@ -54,6 +55,7 @@ static void init_irq_stacks(void)
>  void __init init_IRQ(void)
>  {
>  	init_irq_stacks();
> +	scs_init_irq();

If we internalise this in init_irq_stacks()...

> diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
> index 7689f2031c0c..04519a7cb51d 100644
> --- a/arch/arm64/kernel/sdei.c
> +++ b/arch/arm64/kernel/sdei.c
> @@ -13,6 +13,7 @@
>  #include <asm/kprobes.h>
>  #include <asm/mmu.h>
>  #include <asm/ptrace.h>
> +#include <asm/scs.h>
>  #include <asm/sections.h>
>  #include <asm/stacktrace.h>
>  #include <asm/sysreg.h>
> @@ -138,6 +139,12 @@ unsigned long sdei_arch_get_entry_point(int conduit)
>  			return 0;
>  	}
>  
> +	if (scs_init_sdei()) {

... and this in init_sdei_stacks(), then I think we remove all of the code
from arch/arm64/kernel/scs.c.

> diff --git a/include/linux/scs.h b/include/linux/scs.h
> index 86e3c4b7b714..6b35a83576d4 100644
> --- a/include/linux/scs.h
> +++ b/include/linux/scs.h
> @@ -21,13 +21,11 @@
>  /* An illegal pointer value to mark the end of the shadow stack. */
>  #define SCS_END_MAGIC		(0x5f6UL + POISON_POINTER_DELTA)
>  
> -/* Allocate a static per-CPU shadow stack */
> -#define DEFINE_SCS(name)						\
> -	DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name)	\
> -
>  #define task_scs(tsk)		(task_thread_info(tsk)->scs_base)
>  #define task_scs_sp(tsk)	(task_thread_info(tsk)->scs_sp)
>  
> +void *scs_alloc(int node);
> +void scs_free(void *s);
>  void scs_init(void);
>  int scs_prepare(struct task_struct *tsk, int node);
>  void scs_release(struct task_struct *tsk);
> @@ -56,6 +54,8 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk)
>  
>  #else /* CONFIG_SHADOW_CALL_STACK */
>  
> +static inline void *scs_alloc(int node) { return NULL; }
> +static inline void scs_free(void *s) {}
>  static inline void scs_init(void) {}
>  static inline void scs_task_reset(struct task_struct *tsk) {}
>  static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
> diff --git a/kernel/scs.c b/kernel/scs.c
> index 2136edba548d..8df4a92cd939 100644
> --- a/kernel/scs.c
> +++ b/kernel/scs.c
> @@ -24,7 +24,7 @@ static void __scs_account(void *s, int account)
>  #define NR_CACHED_SCS 2
>  static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
>  
> -static void *scs_alloc(int node)
> +void *scs_alloc(int node)
>  {
>  	int i;
>  	void *s;
> @@ -63,7 +63,7 @@ static void *scs_alloc(int node)
>  	return s;
>  }
>  
> -static void scs_free(void *s)
> +void scs_free(void *s)

Should be part of the first patch?

Will

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/2] scs: switch to vmapped shadow stacks
  2020-11-19 13:00   ` Will Deacon
@ 2020-11-20 17:00     ` Sami Tolvanen
  2020-11-23 11:08       ` Will Deacon
  0 siblings, 1 reply; 11+ messages in thread
From: Sami Tolvanen @ 2020-11-20 17:00 UTC (permalink / raw)
  To: Will Deacon
  Cc: Catalin Marinas, Mark Rutland, James Morse, Ard Biesheuvel,
	Kees Cook, linux-arm-kernel, LKML

On Thu, Nov 19, 2020 at 5:00 AM Will Deacon <will@kernel.org> wrote:
>
> Hi Sami,
>
> On Thu, Oct 22, 2020 at 01:23:54PM -0700, Sami Tolvanen wrote:
> > The kernel currently uses kmem_cache to allocate shadow call stacks,
> > which means an overflow may not be immediately detected and can
> > potentially result in another task's shadow stack to be overwritten.
> >
> > This change switches SCS to use virtually mapped shadow stacks,
> > which increases shadow stack size to a full page and provides more
> > robust overflow detection similarly to VMAP_STACK.
> >
> > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > ---
> >  include/linux/scs.h |  7 +----
> >  kernel/scs.c        | 63 ++++++++++++++++++++++++++++++++++++++-------
> >  2 files changed, 55 insertions(+), 15 deletions(-)
>
> Cheers for posting this. I _much_ prefer handling the SCS this way, but I
> have some comments on the implementation below.
>
> > diff --git a/include/linux/scs.h b/include/linux/scs.h
> > index 6dec390cf154..86e3c4b7b714 100644
> > --- a/include/linux/scs.h
> > +++ b/include/linux/scs.h
> > @@ -15,12 +15,7 @@
> >
> >  #ifdef CONFIG_SHADOW_CALL_STACK
> >
> > -/*
> > - * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
> > - * architecture) provided ~40% safety margin on stack usage while keeping
> > - * memory allocation overhead reasonable.
> > - */
> > -#define SCS_SIZE             SZ_1K
> > +#define SCS_SIZE             PAGE_SIZE
>
> We could make this SCS_ORDER and then forget about alignment etc.

It's still convenient to have SCS_SIZE defined, I think. I can
certainly define SCS_ORDER and use that to define SCS_SIZE, but do you
think we'll need an order >0 here at some point in future?

> >  #define GFP_SCS                      (GFP_KERNEL | __GFP_ZERO)
> >
> >  /* An illegal pointer value to mark the end of the shadow stack. */
> > diff --git a/kernel/scs.c b/kernel/scs.c
> > index 4ff4a7ba0094..2136edba548d 100644
> > --- a/kernel/scs.c
> > +++ b/kernel/scs.c
> > @@ -5,50 +5,95 @@
> >   * Copyright (C) 2019 Google LLC
> >   */
> >
> > +#include <linux/cpuhotplug.h>
> >  #include <linux/kasan.h>
> >  #include <linux/mm.h>
> >  #include <linux/scs.h>
> > -#include <linux/slab.h>
> > +#include <linux/vmalloc.h>
> >  #include <linux/vmstat.h>
> >
> > -static struct kmem_cache *scs_cache;
> > -
> >  static void __scs_account(void *s, int account)
> >  {
> > -     struct page *scs_page = virt_to_page(s);
> > +     struct page *scs_page = vmalloc_to_page(s);
> >
> >       mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
> >                           account * (SCS_SIZE / SZ_1K));
> >  }
> >
> > +/* Matches NR_CACHED_STACKS for VMAP_STACK */
> > +#define NR_CACHED_SCS 2
> > +static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
> > +
> >  static void *scs_alloc(int node)
> >  {
> > -     void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
> > +     int i;
> > +     void *s;
> > +
> > +     for (i = 0; i < NR_CACHED_SCS; i++) {
> > +             s = this_cpu_xchg(scs_cache[i], NULL);
> > +             if (s) {
> > +                     memset(s, 0, SCS_SIZE);
> > +                     goto out;
> > +             }
> > +     }
> > +
> > +     /*
> > +      * We allocate a full page for the shadow stack, which should be
> > +      * more than we need. Check the assumption nevertheless.
> > +      */
> > +     BUILD_BUG_ON(SCS_SIZE > PAGE_SIZE);i
>
> With SCS_ORDER, you can drop this.
>
> > +
> > +     s = __vmalloc_node_range(PAGE_SIZE, SCS_SIZE,
> > +                              VMALLOC_START, VMALLOC_END,
> > +                              GFP_SCS, PAGE_KERNEL, 0,
> > +                              node, __builtin_return_address(0));
>
> Do we actually need vmalloc here? If we used alloc_pages() + vmap()

Does it matter that vmap() always uses NUMA_NO_NODE? We'll also lose
the ability to use vfree_atomic() in scs_release() unless we use
VM_MAP_PUT_PAGES and allocate the page array passed to vmap() with
kvmalloc(), which I think we need to do to avoid sleeping in
scs_free().

> instead, then we could avoid the expensive call to vmalloc_to_page()
> in __scs_account().

We still need vmalloc_to_page() in scs_release(). I suppose we could
alternatively follow the example in kernel/fork.c and cache the
vm_struct from find_vm_area() and use vm->pages[0] for the accounting.
Thoughts?

>
> >
> >       if (!s)
> >               return NULL;
> >
> > +out:
> >       *__scs_magic(s) = SCS_END_MAGIC;
> >
> >       /*
> >        * Poison the allocation to catch unintentional accesses to
> >        * the shadow stack when KASAN is enabled.
> >        */
> > -     kasan_poison_object_data(scs_cache, s);
> > +     kasan_poison_vmalloc(s, SCS_SIZE);
> >       __scs_account(s, 1);
> >       return s;
> >  }
> >
> >  static void scs_free(void *s)
> >  {
> > +     int i;
> > +
> >       __scs_account(s, -1);
> > -     kasan_unpoison_object_data(scs_cache, s);
> > -     kmem_cache_free(scs_cache, s);
> > +     kasan_unpoison_vmalloc(s, SCS_SIZE);
>
> I don't see the point in unpoisoning here tbh; vfree_atomic() re-poisons
> almost immediately, so we should probably defer this to scs_alloc() and
> only when picking the stack out of the cache.

Sure, I'll change this in v2.

>
> > +
> > +     for (i = 0; i < NR_CACHED_SCS; i++)
>
> Can you add a comment about the re-entrancy here and why we're using
> this_cpu_cmpxchg() please?

I'll add a comment.

Sami

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/2] scs: switch to vmapped shadow stacks
  2020-11-20 17:00     ` Sami Tolvanen
@ 2020-11-23 11:08       ` Will Deacon
  0 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2020-11-23 11:08 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Catalin Marinas, Mark Rutland, James Morse, Ard Biesheuvel,
	Kees Cook, linux-arm-kernel, LKML

Hi Sami,

On Fri, Nov 20, 2020 at 09:00:17AM -0800, Sami Tolvanen wrote:
> On Thu, Nov 19, 2020 at 5:00 AM Will Deacon <will@kernel.org> wrote:
> > On Thu, Oct 22, 2020 at 01:23:54PM -0700, Sami Tolvanen wrote:
> > > The kernel currently uses kmem_cache to allocate shadow call stacks,
> > > which means an overflow may not be immediately detected and can
> > > potentially result in another task's shadow stack to be overwritten.
> > >
> > > This change switches SCS to use virtually mapped shadow stacks,
> > > which increases shadow stack size to a full page and provides more
> > > robust overflow detection similarly to VMAP_STACK.
> > >
> > > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > > ---
> > >  include/linux/scs.h |  7 +----
> > >  kernel/scs.c        | 63 ++++++++++++++++++++++++++++++++++++++-------
> > >  2 files changed, 55 insertions(+), 15 deletions(-)
> >
> > Cheers for posting this. I _much_ prefer handling the SCS this way, but I
> > have some comments on the implementation below.
> >
> > > diff --git a/include/linux/scs.h b/include/linux/scs.h
> > > index 6dec390cf154..86e3c4b7b714 100644
> > > --- a/include/linux/scs.h
> > > +++ b/include/linux/scs.h
> > > @@ -15,12 +15,7 @@
> > >
> > >  #ifdef CONFIG_SHADOW_CALL_STACK
> > >
> > > -/*
> > > - * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
> > > - * architecture) provided ~40% safety margin on stack usage while keeping
> > > - * memory allocation overhead reasonable.
> > > - */
> > > -#define SCS_SIZE             SZ_1K
> > > +#define SCS_SIZE             PAGE_SIZE
> >
> > We could make this SCS_ORDER and then forget about alignment etc.
> 
> It's still convenient to have SCS_SIZE defined, I think. I can
> certainly define SCS_ORDER and use that to define SCS_SIZE, but do you
> think we'll need an order >0 here at some point in future?

I'm not daft enough to comment on SCS size again ;)
Let's define SCS_ORDER 0 and then SCS_SIZE in terms of that.

> 
> > >  #define GFP_SCS                      (GFP_KERNEL | __GFP_ZERO)
> > >
> > >  /* An illegal pointer value to mark the end of the shadow stack. */
> > > diff --git a/kernel/scs.c b/kernel/scs.c
> > > index 4ff4a7ba0094..2136edba548d 100644
> > > --- a/kernel/scs.c
> > > +++ b/kernel/scs.c
> > > @@ -5,50 +5,95 @@
> > >   * Copyright (C) 2019 Google LLC
> > >   */
> > >
> > > +#include <linux/cpuhotplug.h>
> > >  #include <linux/kasan.h>
> > >  #include <linux/mm.h>
> > >  #include <linux/scs.h>
> > > -#include <linux/slab.h>
> > > +#include <linux/vmalloc.h>
> > >  #include <linux/vmstat.h>
> > >
> > > -static struct kmem_cache *scs_cache;
> > > -
> > >  static void __scs_account(void *s, int account)
> > >  {
> > > -     struct page *scs_page = virt_to_page(s);
> > > +     struct page *scs_page = vmalloc_to_page(s);
> > >
> > >       mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
> > >                           account * (SCS_SIZE / SZ_1K));
> > >  }
> > >
> > > +/* Matches NR_CACHED_STACKS for VMAP_STACK */
> > > +#define NR_CACHED_SCS 2
> > > +static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
> > > +
> > >  static void *scs_alloc(int node)
> > >  {
> > > -     void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
> > > +     int i;
> > > +     void *s;
> > > +
> > > +     for (i = 0; i < NR_CACHED_SCS; i++) {
> > > +             s = this_cpu_xchg(scs_cache[i], NULL);
> > > +             if (s) {
> > > +                     memset(s, 0, SCS_SIZE);
> > > +                     goto out;
> > > +             }
> > > +     }
> > > +
> > > +     /*
> > > +      * We allocate a full page for the shadow stack, which should be
> > > +      * more than we need. Check the assumption nevertheless.
> > > +      */
> > > +     BUILD_BUG_ON(SCS_SIZE > PAGE_SIZE);i
> >
> > With SCS_ORDER, you can drop this.
> >
> > > +
> > > +     s = __vmalloc_node_range(PAGE_SIZE, SCS_SIZE,
> > > +                              VMALLOC_START, VMALLOC_END,
> > > +                              GFP_SCS, PAGE_KERNEL, 0,
> > > +                              node, __builtin_return_address(0));
> >
> > Do we actually need vmalloc here? If we used alloc_pages() + vmap()
> 
> Does it matter that vmap() always uses NUMA_NO_NODE? We'll also lose
> the ability to use vfree_atomic() in scs_release() unless we use
> VM_MAP_PUT_PAGES and allocate the page array passed to vmap() with
> kvmalloc(), which I think we need to do to avoid sleeping in
> scs_free().

Huh, I didn't realise we didn't have vunmap_atomic(). In which case, I take
that back -- let's stick with vmalloc() for now.

Cheers,

Will

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2020-11-23 11:08 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-22 20:23 [PATCH 0/2] scs: switch to vmapped shadow stacks Sami Tolvanen
2020-10-22 20:23 ` [PATCH 1/2] " Sami Tolvanen
2020-10-22 22:38   ` Kees Cook
2020-11-19 13:00   ` Will Deacon
2020-11-20 17:00     ` Sami Tolvanen
2020-11-23 11:08       ` Will Deacon
2020-10-22 20:23 ` [PATCH 2/2] arm64: scs: use vmapped IRQ and SDEI " Sami Tolvanen
2020-10-22 22:38   ` Kees Cook
2020-11-19 13:11   ` Will Deacon
2020-11-17 17:35 ` [PATCH 0/2] scs: switch to vmapped " Catalin Marinas
2020-11-18  9:27   ` Will Deacon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).