linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* x86/irq: Make run_on_irqstack_cond() typesafe
@ 2020-09-22  7:58 Thomas Gleixner
  2020-09-22 18:45 ` Sami Tolvanen
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Thomas Gleixner @ 2020-09-22  7:58 UTC (permalink / raw)
  To: LKML; +Cc: x86, Sami Tolvanen, Kees Cook, Jann Horn

Sami reported that run_on_irqstack_cond() requires the caller to cast
functions to mismatching types, which trips indirect call Control-Flow
Integrity (CFI) in Clang.

Instead of disabling CFI on that function, provide proper helpers for
the three call variants. The actual ASM code stays the same as that is
out of reach.

Reported-by: Sami Tolvanen <samitolvanen@google.com>
Fixes: 931b94145981 ("x86/entry: Provide helpers for executing on the irqstack")
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/entry/common.c          |    2 -
 arch/x86/entry/entry_64.S        |    2 +
 arch/x86/include/asm/idtentry.h  |    2 -
 arch/x86/include/asm/irq_stack.h |   69 ++++++++++++++++++++++++++++++++++-----
 arch/x86/kernel/irq.c            |    2 -
 arch/x86/kernel/irq_64.c         |    2 -
 6 files changed, 67 insertions(+), 12 deletions(-)

--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -299,7 +299,7 @@ static void __xen_pv_evtchn_do_upcall(vo
 	old_regs = set_irq_regs(regs);
 
 	instrumentation_begin();
-	run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
+	run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
 	instrumentation_begin();
 
 	set_irq_regs(old_regs);
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -762,6 +762,8 @@ SYM_CODE_END(.Lbad_gs)
  * rdx: Function argument (can be NULL if none)
  */
 SYM_FUNC_START(asm_call_on_stack)
+SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
+SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
 	/*
 	 * Save the frame pointer unconditionally. This allows the ORC
 	 * unwinder to handle the stack switch.
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -242,7 +242,7 @@ static void __##func(struct pt_regs *reg
 	instrumentation_begin();					\
 	irq_enter_rcu();						\
 	kvm_set_cpu_l1tf_flush_l1d();					\
-	run_on_irqstack_cond(__##func, regs, regs);			\
+	run_sysvec_on_irqstack_cond(__##func, regs);			\
 	irq_exit_rcu();							\
 	instrumentation_end();						\
 	irqentry_exit(regs, state);					\
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -12,20 +12,50 @@ static __always_inline bool irqstack_act
 	return __this_cpu_read(irq_count) != -1;
 }
 
-void asm_call_on_stack(void *sp, void *func, void *arg);
+void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
+void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
+			      struct pt_regs *regs);
+void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
+			   struct irq_desc *desc);
 
-static __always_inline void __run_on_irqstack(void *func, void *arg)
+static __always_inline void __run_on_irqstack(void (*func)(void))
 {
 	void *tos = __this_cpu_read(hardirq_stack_ptr);
 
 	__this_cpu_add(irq_count, 1);
-	asm_call_on_stack(tos - 8, func, arg);
+	asm_call_on_stack(tos - 8, func, NULL);
+	__this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+			 struct pt_regs *regs)
+{
+	void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+	__this_cpu_add(irq_count, 1);
+	asm_call_sysvec_on_stack(tos - 8, func, regs);
+	__this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+		      struct irq_desc *desc)
+{
+	void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+	__this_cpu_add(irq_count, 1);
+	asm_call_irq_on_stack(tos - 8, func, desc);
 	__this_cpu_sub(irq_count, 1);
 }
 
 #else /* CONFIG_X86_64 */
 static inline bool irqstack_active(void) { return false; }
-static inline void __run_on_irqstack(void *func, void *arg) { }
+static inline void __run_on_irqstack(void (*func)(void), void *arg) { }
+static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+					    struct pt_regs *regs) { }
+static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+					 struct irq_desc *desc) { }
 #endif /* !CONFIG_X86_64 */
 
 static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
@@ -37,17 +67,40 @@ static __always_inline bool irq_needs_ir
 	return !user_mode(regs) && !irqstack_active();
 }
 
-static __always_inline void run_on_irqstack_cond(void *func, void *arg,
+
+static __always_inline void run_on_irqstack_cond(void (*func)(void),
 						 struct pt_regs *regs)
 {
-	void (*__func)(void *arg) = func;
+	lockdep_assert_irqs_disabled();
+
+	if (irq_needs_irq_stack(regs))
+		__run_on_irqstack(func);
+	else
+		func();
+}
+
+static __always_inline void
+run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
+			    struct pt_regs *regs)
+{
+	lockdep_assert_irqs_disabled();
 
+	if (irq_needs_irq_stack(regs))
+		__run_sysvec_on_irqstack(func, regs);
+	else
+		func(regs);
+}
+
+static __always_inline void
+run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
+			 struct pt_regs *regs)
+{
 	lockdep_assert_irqs_disabled();
 
 	if (irq_needs_irq_stack(regs))
-		__run_on_irqstack(__func, arg);
+		__run_irq_on_irqstack(func, desc);
 	else
-		__func(arg);
+		func(desc);
 }
 
 #endif
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -227,7 +227,7 @@ static __always_inline void handle_irq(s
 				       struct pt_regs *regs)
 {
 	if (IS_ENABLED(CONFIG_X86_64))
-		run_on_irqstack_cond(desc->handle_irq, desc, regs);
+		run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
 	else
 		__handle_irq(desc, regs);
 }
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned in
 
 void do_softirq_own_stack(void)
 {
-	run_on_irqstack_cond(__do_softirq, NULL, NULL);
+	run_on_irqstack_cond(__do_softirq, NULL);
 }

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: x86/irq: Make run_on_irqstack_cond() typesafe
  2020-09-22  7:58 x86/irq: Make run_on_irqstack_cond() typesafe Thomas Gleixner
@ 2020-09-22 18:45 ` Sami Tolvanen
  2020-09-22 20:38 ` [tip: x86/urgent] " tip-bot2 for Thomas Gleixner
  2020-09-23 19:19 ` Kees Cook
  2 siblings, 0 replies; 5+ messages in thread
From: Sami Tolvanen @ 2020-09-22 18:45 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: LKML, X86 ML, Kees Cook, Jann Horn, Nathan Chancellor

Hi Thomas,

On Tue, Sep 22, 2020 at 12:58 AM Thomas Gleixner <tglx@linutronix.de> wrote:
>
> Sami reported that run_on_irqstack_cond() requires the caller to cast
> functions to mismatching types, which trips indirect call Control-Flow
> Integrity (CFI) in Clang.
>
> Instead of disabling CFI on that function, provide proper helpers for
> the three call variants. The actual ASM code stays the same as that is
> out of reach.
>
> Reported-by: Sami Tolvanen <samitolvanen@google.com>
> Fixes: 931b94145981 ("x86/entry: Provide helpers for executing on the irqstack")
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Thank you for the patch! I confirmed that this fixes the type mismatch
issue with CFI for me:

Tested-by: Sami Tolvanen <samitolvanen@google.com>

Also, please note that this was first reported by Nathan in this GitHub issue:
https://github.com/ClangBuiltLinux/linux/issues/1052

Sami

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [tip: x86/urgent] x86/irq: Make run_on_irqstack_cond() typesafe
  2020-09-22  7:58 x86/irq: Make run_on_irqstack_cond() typesafe Thomas Gleixner
  2020-09-22 18:45 ` Sami Tolvanen
@ 2020-09-22 20:38 ` tip-bot2 for Thomas Gleixner
  2020-09-23 19:19 ` Kees Cook
  2 siblings, 0 replies; 5+ messages in thread
From: tip-bot2 for Thomas Gleixner @ 2020-09-22 20:38 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Nathan Chancellor, Sami Tolvanen, Thomas Gleixner,
	Borislav Petkov, stable, x86, LKML

The following commit has been merged into the x86/urgent branch of tip:

Commit-ID:     a7b3474cbb2864d5500d5e4f48dd57c903975cab
Gitweb:        https://git.kernel.org/tip/a7b3474cbb2864d5500d5e4f48dd57c903975cab
Author:        Thomas Gleixner <tglx@linutronix.de>
AuthorDate:    Tue, 22 Sep 2020 09:58:52 +02:00
Committer:     Borislav Petkov <bp@suse.de>
CommitterDate: Tue, 22 Sep 2020 22:13:34 +02:00

x86/irq: Make run_on_irqstack_cond() typesafe

Sami reported that run_on_irqstack_cond() requires the caller to cast
functions to mismatching types, which trips indirect call Control-Flow
Integrity (CFI) in Clang.

Instead of disabling CFI on that function, provide proper helpers for
the three call variants. The actual ASM code stays the same as that is
out of reach.

 [ bp: Fix __run_on_irqstack() prototype to match. ]

Fixes: 931b94145981 ("x86/entry: Provide helpers for executing on the irqstack")
Reported-by: Nathan Chancellor <natechancellor@gmail.com>
Reported-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Tested-by: Sami Tolvanen <samitolvanen@google.com>
Cc: <stable@vger.kernel.org>
Link: https://github.com/ClangBuiltLinux/linux/issues/1052
Link: https://lkml.kernel.org/r/87pn6eb5tv.fsf@nanos.tec.linutronix.de
---
 arch/x86/entry/common.c          |  2 +-
 arch/x86/entry/entry_64.S        |  2 +-
 arch/x86/include/asm/idtentry.h  |  2 +-
 arch/x86/include/asm/irq_stack.h | 69 +++++++++++++++++++++++++++----
 arch/x86/kernel/irq.c            |  2 +-
 arch/x86/kernel/irq_64.c         |  2 +-
 6 files changed, 67 insertions(+), 12 deletions(-)

diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 2f84c7c..870efee 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -299,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
 	old_regs = set_irq_regs(regs);
 
 	instrumentation_begin();
-	run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
+	run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
 	instrumentation_begin();
 
 	set_irq_regs(old_regs);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 70dea93..d977079 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs)
  * rdx: Function argument (can be NULL if none)
  */
 SYM_FUNC_START(asm_call_on_stack)
+SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
+SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
 	/*
 	 * Save the frame pointer unconditionally. This allows the ORC
 	 * unwinder to handle the stack switch.
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index a433661..a063864 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs)			\
 	instrumentation_begin();					\
 	irq_enter_rcu();						\
 	kvm_set_cpu_l1tf_flush_l1d();					\
-	run_on_irqstack_cond(__##func, regs, regs);			\
+	run_sysvec_on_irqstack_cond(__##func, regs);			\
 	irq_exit_rcu();							\
 	instrumentation_end();						\
 	irqentry_exit(regs, state);					\
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 4ae66f0..7758169 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void)
 	return __this_cpu_read(irq_count) != -1;
 }
 
-void asm_call_on_stack(void *sp, void *func, void *arg);
+void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
+void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
+			      struct pt_regs *regs);
+void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
+			   struct irq_desc *desc);
 
-static __always_inline void __run_on_irqstack(void *func, void *arg)
+static __always_inline void __run_on_irqstack(void (*func)(void))
 {
 	void *tos = __this_cpu_read(hardirq_stack_ptr);
 
 	__this_cpu_add(irq_count, 1);
-	asm_call_on_stack(tos - 8, func, arg);
+	asm_call_on_stack(tos - 8, func, NULL);
+	__this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+			 struct pt_regs *regs)
+{
+	void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+	__this_cpu_add(irq_count, 1);
+	asm_call_sysvec_on_stack(tos - 8, func, regs);
+	__this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+		      struct irq_desc *desc)
+{
+	void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+	__this_cpu_add(irq_count, 1);
+	asm_call_irq_on_stack(tos - 8, func, desc);
 	__this_cpu_sub(irq_count, 1);
 }
 
 #else /* CONFIG_X86_64 */
 static inline bool irqstack_active(void) { return false; }
-static inline void __run_on_irqstack(void *func, void *arg) { }
+static inline void __run_on_irqstack(void (*func)(void)) { }
+static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+					    struct pt_regs *regs) { }
+static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+					 struct irq_desc *desc) { }
 #endif /* !CONFIG_X86_64 */
 
 static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
@@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
 	return !user_mode(regs) && !irqstack_active();
 }
 
-static __always_inline void run_on_irqstack_cond(void *func, void *arg,
+
+static __always_inline void run_on_irqstack_cond(void (*func)(void),
 						 struct pt_regs *regs)
 {
-	void (*__func)(void *arg) = func;
+	lockdep_assert_irqs_disabled();
+
+	if (irq_needs_irq_stack(regs))
+		__run_on_irqstack(func);
+	else
+		func();
+}
+
+static __always_inline void
+run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
+			    struct pt_regs *regs)
+{
+	lockdep_assert_irqs_disabled();
 
+	if (irq_needs_irq_stack(regs))
+		__run_sysvec_on_irqstack(func, regs);
+	else
+		func(regs);
+}
+
+static __always_inline void
+run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
+			 struct pt_regs *regs)
+{
 	lockdep_assert_irqs_disabled();
 
 	if (irq_needs_irq_stack(regs))
-		__run_on_irqstack(__func, arg);
+		__run_irq_on_irqstack(func, desc);
 	else
-		__func(arg);
+		func(desc);
 }
 
 #endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 1810602..c5dd503 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
 				       struct pt_regs *regs)
 {
 	if (IS_ENABLED(CONFIG_X86_64))
-		run_on_irqstack_cond(desc->handle_irq, desc, regs);
+		run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
 	else
 		__handle_irq(desc, regs);
 }
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1b4fe93..440eed5 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
 
 void do_softirq_own_stack(void)
 {
-	run_on_irqstack_cond(__do_softirq, NULL, NULL);
+	run_on_irqstack_cond(__do_softirq, NULL);
 }

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: x86/irq: Make run_on_irqstack_cond() typesafe
  2020-09-22  7:58 x86/irq: Make run_on_irqstack_cond() typesafe Thomas Gleixner
  2020-09-22 18:45 ` Sami Tolvanen
  2020-09-22 20:38 ` [tip: x86/urgent] " tip-bot2 for Thomas Gleixner
@ 2020-09-23 19:19 ` Kees Cook
  2020-09-23 19:36   ` Jann Horn
  2 siblings, 1 reply; 5+ messages in thread
From: Kees Cook @ 2020-09-23 19:19 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: LKML, x86, Sami Tolvanen, Jann Horn

On Tue, Sep 22, 2020 at 09:58:52AM +0200, Thomas Gleixner wrote:
> -void asm_call_on_stack(void *sp, void *func, void *arg);
> +void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
> +void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
> +			      struct pt_regs *regs);
> +void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
> +			   struct irq_desc *desc);

Eeeh, err. So, this is nice for the CFI case, but can we instead just
inline asm_call_on_stack() instead? Having any of these as distinct
functions in the kernel is really not safe: it provides a trivial
global stack-pivot[1] function for use in ROP attacks, which is one
of the central requirements for mounting such attacks. This allows a
completely arbitrary sp argument, function, and first argument. :(

Much better would be to keep asm_call_on_stack() as an inline so the
stack pointer is always coming from percpu variables, and to have the
irq_count actually checked (i.e. freak out if it falls below zero to
catch jumps into the middle of a function when an attempt to bypass the
load from the percpu area happens). I would expect this form to be much
robust:

inc
load sp from per-cpu
pivot sp
make call
restore sp
WARN(dec_and_test)


-Kees

[1] https://security.stackexchange.com/questions/44418/return-oriented-programming-how-to-find-a-stack-pivot

-- 
Kees Cook

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: x86/irq: Make run_on_irqstack_cond() typesafe
  2020-09-23 19:19 ` Kees Cook
@ 2020-09-23 19:36   ` Jann Horn
  0 siblings, 0 replies; 5+ messages in thread
From: Jann Horn @ 2020-09-23 19:36 UTC (permalink / raw)
  To: Kees Cook; +Cc: Thomas Gleixner, LKML, the arch/x86 maintainers, Sami Tolvanen

On Wed, Sep 23, 2020 at 9:20 PM Kees Cook <keescook@chromium.org> wrote:
> On Tue, Sep 22, 2020 at 09:58:52AM +0200, Thomas Gleixner wrote:
> > -void asm_call_on_stack(void *sp, void *func, void *arg);
> > +void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
> > +void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
> > +                           struct pt_regs *regs);
> > +void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
> > +                        struct irq_desc *desc);
>
> Eeeh, err. So, this is nice for the CFI case, but can we instead just
> inline asm_call_on_stack() instead? Having any of these as distinct
> functions in the kernel is really not safe: it provides a trivial
> global stack-pivot[1] function for use in ROP attacks, which is one
> of the central requirements for mounting such attacks. This allows a
> completely arbitrary sp argument, function, and first argument. :(
>
> Much better would be to keep asm_call_on_stack() as an inline so the
> stack pointer is always coming from percpu variables, and to have the
> irq_count actually checked (i.e. freak out if it falls below zero to
> catch jumps into the middle of a function when an attempt to bypass the
> load from the percpu area happens). I would expect this form to be much
> robust:
>
> inc
> load sp from per-cpu
> pivot sp
> make call
> restore sp
> WARN(dec_and_test)

I don't see the point. If you can already jump to arbitrary kernel
instructions, I would be extremely surprised if you could't find some
other way to get full kernel read/write. Even just jumping to the
epilogue of some function that increments the stack pointer and then
tries to return (maybe even after loading RBP from that spot on the
stack) will probably get you quite far.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-09-23 19:37 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-22  7:58 x86/irq: Make run_on_irqstack_cond() typesafe Thomas Gleixner
2020-09-22 18:45 ` Sami Tolvanen
2020-09-22 20:38 ` [tip: x86/urgent] " tip-bot2 for Thomas Gleixner
2020-09-23 19:19 ` Kees Cook
2020-09-23 19:36   ` Jann Horn

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).