All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86/mm/tlb: Remove flush_tlb_info from the stack
@ 2019-04-23  6:57 Nadav Amit
  2019-04-23 16:50 ` Andy Lutomirski
  0 siblings, 1 reply; 5+ messages in thread
From: Nadav Amit @ 2019-04-23  6:57 UTC (permalink / raw)
  To: Peter Zijlstra, Borislav Petkov
  Cc: Andy Lutomirski, Ingo Molnar, Thomas Gleixner, x86, linux-kernel,
	Nadav Amit, Dave Hansen

Remove flush_tlb_info variables from the stack. This allows to align
flush_tlb_info to cache-line and avoid potentially unnecessary cache
line movements. It also allows to have a fixed virtual-to-physical
translation of the variables, which reduces TLB misses.

Use per-CPU struct for flush_tlb_mm_range() and
flush_tlb_kernel_range(). Add debug assertions to ensure there are
no nested TLB flushes that might overwrite the per-CPU data. For
arch_tlbbatch_flush(), use a const struct.

Results when running a microbenchmarks that performs 10^6 MADV_DONTEED
operations and touching a page, in which 3 additional threads run a
busy-wait loop (5 runs):

			base		off-stack
			----		---------
avg (per operation)	1.629		1.580	(-3%)
stddev			0.007		0.012

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Nadav Amit <namit@vmware.com>
---
 arch/x86/mm/tlb.c | 75 ++++++++++++++++++++++++++++++++++-------------
 1 file changed, 54 insertions(+), 21 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 487b8474c01c..c4ac66dfb34e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -634,7 +634,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
 	this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
 }
 
-static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
 {
 	const struct flush_tlb_info *f = info;
 
@@ -722,43 +722,62 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
  */
 unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
+
+#ifdef CONFIG_DEBUG_VM
+static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
+#endif
+
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 				unsigned long end, unsigned int stride_shift,
 				bool freed_tables)
 {
+	struct flush_tlb_info *info;
 	int cpu;
 
-	struct flush_tlb_info info = {
-		.mm = mm,
-		.stride_shift = stride_shift,
-		.freed_tables = freed_tables,
-	};
-
 	cpu = get_cpu();
+	info = this_cpu_ptr(&flush_tlb_info);
+
+	/*
+	 * Ensure that the following code is non-reentrant and flush_tlb_info
+	 * is not overwritten. This means no TLB flushing is initiated by
+	 * interrupt handlers and machine-check exception handlers.
+	 */
+#ifdef CONFIG_DEBUG_VM
+	BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
+#endif
 
 	/* This is also a barrier that synchronizes with switch_mm(). */
-	info.new_tlb_gen = inc_mm_tlb_gen(mm);
+	info->new_tlb_gen = inc_mm_tlb_gen(mm);
+	info->mm = mm;
+	info->stride_shift = stride_shift;
+	info->freed_tables = freed_tables;
 
 	/* Should we flush just the requested range? */
 	if ((end != TLB_FLUSH_ALL) &&
 	    ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
-		info.start = start;
-		info.end = end;
+		info->start = start;
+		info->end = end;
 	} else {
-		info.start = 0UL;
-		info.end = TLB_FLUSH_ALL;
+		info->start = 0UL;
+		info->end = TLB_FLUSH_ALL;
 	}
 
 	if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
-		VM_WARN_ON(irqs_disabled());
+		lockdep_assert_irqs_enabled();
 		local_irq_disable();
-		flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
+		flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
 		local_irq_enable();
 	}
 
 	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), &info);
+		flush_tlb_others(mm_cpumask(mm), info);
 
+#ifdef CONFIG_DEBUG_VM
+	barrier();
+	this_cpu_dec(flush_tlb_info_idx);
+#endif
 	put_cpu();
 }
 
@@ -787,22 +806,36 @@ static void do_kernel_range_flush(void *info)
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-
 	/* Balance as user space task's flush, a bit conservative */
 	if (end == TLB_FLUSH_ALL ||
 	    (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
 		on_each_cpu(do_flush_tlb_all, NULL, 1);
 	} else {
-		struct flush_tlb_info info;
-		info.start = start;
-		info.end = end;
-		on_each_cpu(do_kernel_range_flush, &info, 1);
+		struct flush_tlb_info *info;
+
+		preempt_disable();
+
+#ifdef CONFIG_DEBUG_VM
+		BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
+#endif
+
+		info = this_cpu_ptr(&flush_tlb_info);
+		info->start = start;
+		info->end = end;
+
+		on_each_cpu(do_kernel_range_flush, info, 1);
+
+#ifdef CONFIG_DEBUG_VM
+		barrier();
+		this_cpu_dec(flush_tlb_info_idx);
+#endif
+		preempt_enable();
 	}
 }
 
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 {
-	struct flush_tlb_info info = {
+	static const struct flush_tlb_info info = {
 		.mm = NULL,
 		.start = 0UL,
 		.end = TLB_FLUSH_ALL,
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86/mm/tlb: Remove flush_tlb_info from the stack
  2019-04-23  6:57 [PATCH] x86/mm/tlb: Remove flush_tlb_info from the stack Nadav Amit
@ 2019-04-23 16:50 ` Andy Lutomirski
  2019-04-23 16:56   ` Nadav Amit
  0 siblings, 1 reply; 5+ messages in thread
From: Andy Lutomirski @ 2019-04-23 16:50 UTC (permalink / raw)
  To: Nadav Amit
  Cc: Peter Zijlstra, Borislav Petkov, Andy Lutomirski, Ingo Molnar,
	Thomas Gleixner, X86 ML, LKML, Dave Hansen

On Tue, Apr 23, 2019 at 12:12 AM Nadav Amit <namit@vmware.com> wrote:
>
> Remove flush_tlb_info variables from the stack. This allows to align
> flush_tlb_info to cache-line and avoid potentially unnecessary cache
> line movements. It also allows to have a fixed virtual-to-physical
> translation of the variables, which reduces TLB misses.
>
> Use per-CPU struct for flush_tlb_mm_range() and
> flush_tlb_kernel_range(). Add debug assertions to ensure there are
> no nested TLB flushes that might overwrite the per-CPU data. For
> arch_tlbbatch_flush(), use a const struct.
>
> Results when running a microbenchmarks that performs 10^6 MADV_DONTEED
> operations and touching a page, in which 3 additional threads run a
> busy-wait loop (5 runs):

Can you add a memset(,,,. 0, sizeof(struct flush_tlb_info)) everywhere
you grab it?  Or, even better, perhaps do something like:

static inline struct flush_tlb_info *get_flush_tlb_info(void)
{
  /* check reentrancy, make sure that we use smp_processor_id() or
otherwise assert that we're bound to a single CPU. */
  struct flush_tlb_info *ptr = this_cpu_ptr(...);
  memset(ptr, 0, sizeof(*ptr));
  return ptr;
}

static inline void put_flush_tlb_info(void)
{
 /* finish checking reentrancy. */
}

>
>                         base            off-stack
>                         ----            ---------
> avg (per operation)     1.629           1.580   (-3%)
> stddev                  0.007           0.012
>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Andy Lutomirski <luto@kernel.org>
> Cc: Dave Hansen <dave.hansen@intel.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Signed-off-by: Nadav Amit <namit@vmware.com>
> ---
>  arch/x86/mm/tlb.c | 75 ++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 54 insertions(+), 21 deletions(-)
>
> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
> index 487b8474c01c..c4ac66dfb34e 100644
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -634,7 +634,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
>         this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
>  }
>
> -static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
> +static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
>  {
>         const struct flush_tlb_info *f = info;
>
> @@ -722,43 +722,62 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
>   */
>  unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
>
> +
> +static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
> +
> +#ifdef CONFIG_DEBUG_VM
> +static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
> +#endif
> +
>  void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
>                                 unsigned long end, unsigned int stride_shift,
>                                 bool freed_tables)
>  {
> +       struct flush_tlb_info *info;
>         int cpu;
>
> -       struct flush_tlb_info info = {
> -               .mm = mm,
> -               .stride_shift = stride_shift,
> -               .freed_tables = freed_tables,
> -       };
> -
>         cpu = get_cpu();
> +       info = this_cpu_ptr(&flush_tlb_info);
> +
> +       /*
> +        * Ensure that the following code is non-reentrant and flush_tlb_info
> +        * is not overwritten. This means no TLB flushing is initiated by
> +        * interrupt handlers and machine-check exception handlers.
> +        */
> +#ifdef CONFIG_DEBUG_VM
> +       BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
> +#endif
>
>         /* This is also a barrier that synchronizes with switch_mm(). */
> -       info.new_tlb_gen = inc_mm_tlb_gen(mm);
> +       info->new_tlb_gen = inc_mm_tlb_gen(mm);
> +       info->mm = mm;
> +       info->stride_shift = stride_shift;
> +       info->freed_tables = freed_tables;
>
>         /* Should we flush just the requested range? */
>         if ((end != TLB_FLUSH_ALL) &&
>             ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
> -               info.start = start;
> -               info.end = end;
> +               info->start = start;
> +               info->end = end;
>         } else {
> -               info.start = 0UL;
> -               info.end = TLB_FLUSH_ALL;
> +               info->start = 0UL;
> +               info->end = TLB_FLUSH_ALL;
>         }
>
>         if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
> -               VM_WARN_ON(irqs_disabled());
> +               lockdep_assert_irqs_enabled();
>                 local_irq_disable();
> -               flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
> +               flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
>                 local_irq_enable();
>         }
>
>         if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
> -               flush_tlb_others(mm_cpumask(mm), &info);
> +               flush_tlb_others(mm_cpumask(mm), info);
>
> +#ifdef CONFIG_DEBUG_VM
> +       barrier();
> +       this_cpu_dec(flush_tlb_info_idx);
> +#endif
>         put_cpu();
>  }
>
> @@ -787,22 +806,36 @@ static void do_kernel_range_flush(void *info)
>
>  void flush_tlb_kernel_range(unsigned long start, unsigned long end)
>  {
> -
>         /* Balance as user space task's flush, a bit conservative */
>         if (end == TLB_FLUSH_ALL ||
>             (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
>                 on_each_cpu(do_flush_tlb_all, NULL, 1);
>         } else {
> -               struct flush_tlb_info info;
> -               info.start = start;
> -               info.end = end;
> -               on_each_cpu(do_kernel_range_flush, &info, 1);
> +               struct flush_tlb_info *info;
> +
> +               preempt_disable();
> +
> +#ifdef CONFIG_DEBUG_VM
> +               BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
> +#endif
> +
> +               info = this_cpu_ptr(&flush_tlb_info);
> +               info->start = start;
> +               info->end = end;
> +
> +               on_each_cpu(do_kernel_range_flush, info, 1);
> +
> +#ifdef CONFIG_DEBUG_VM
> +               barrier();
> +               this_cpu_dec(flush_tlb_info_idx);
> +#endif
> +               preempt_enable();
>         }
>  }
>
>  void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
>  {
> -       struct flush_tlb_info info = {
> +       static const struct flush_tlb_info info = {
>                 .mm = NULL,
>                 .start = 0UL,
>                 .end = TLB_FLUSH_ALL,
> --
> 2.19.1
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86/mm/tlb: Remove flush_tlb_info from the stack
  2019-04-23 16:50 ` Andy Lutomirski
@ 2019-04-23 16:56   ` Nadav Amit
  2019-04-23 17:23     ` Andy Lutomirski
  0 siblings, 1 reply; 5+ messages in thread
From: Nadav Amit @ 2019-04-23 16:56 UTC (permalink / raw)
  To: Andy Lutomirski
  Cc: Peter Zijlstra, Borislav Petkov, Ingo Molnar, Thomas Gleixner,
	X86 ML, LKML, Dave Hansen

> On Apr 23, 2019, at 9:50 AM, Andy Lutomirski <luto@kernel.org> wrote:
> 
> On Tue, Apr 23, 2019 at 12:12 AM Nadav Amit <namit@vmware.com> wrote:
>> Remove flush_tlb_info variables from the stack. This allows to align
>> flush_tlb_info to cache-line and avoid potentially unnecessary cache
>> line movements. It also allows to have a fixed virtual-to-physical
>> translation of the variables, which reduces TLB misses.
>> 
>> Use per-CPU struct for flush_tlb_mm_range() and
>> flush_tlb_kernel_range(). Add debug assertions to ensure there are
>> no nested TLB flushes that might overwrite the per-CPU data. For
>> arch_tlbbatch_flush(), use a const struct.
>> 
>> Results when running a microbenchmarks that performs 10^6 MADV_DONTEED
>> operations and touching a page, in which 3 additional threads run a
>> busy-wait loop (5 runs):
> 
> Can you add a memset(,,,. 0, sizeof(struct flush_tlb_info)) everywhere
> you grab it?  Or, even better, perhaps do something like:
> 
> static inline struct flush_tlb_info *get_flush_tlb_info(void)
> {
>  /* check reentrancy, make sure that we use smp_processor_id() or
> otherwise assert that we're bound to a single CPU. */
>  struct flush_tlb_info *ptr = this_cpu_ptr(...);
>  memset(ptr, 0, sizeof(*ptr));
>  return ptr;
> }
> 
> static inline void put_flush_tlb_info(void)
> {
> /* finish checking reentrancy. */
> }

I’ll check if the compiler is smart enough to avoid redundant assignments,
and if it is not, I’ll just give all the struct arguments to
get_flush_tlb_info() instead of memset() if you don’t mind.

I also want to give a try for parallelizing the remote and local
invocations, which really annoys me every time I look at the code. Please
let me know if there is any “big reason” that I am missing for it not to be
done before. I just hope all the paravirt stuff will not make it too ugly.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86/mm/tlb: Remove flush_tlb_info from the stack
  2019-04-23 16:56   ` Nadav Amit
@ 2019-04-23 17:23     ` Andy Lutomirski
  2019-04-25 18:13       ` Nadav Amit
  0 siblings, 1 reply; 5+ messages in thread
From: Andy Lutomirski @ 2019-04-23 17:23 UTC (permalink / raw)
  To: Nadav Amit
  Cc: Andy Lutomirski, Peter Zijlstra, Borislav Petkov, Ingo Molnar,
	Thomas Gleixner, X86 ML, LKML, Dave Hansen

On Tue, Apr 23, 2019 at 9:56 AM Nadav Amit <namit@vmware.com> wrote:
>
> > On Apr 23, 2019, at 9:50 AM, Andy Lutomirski <luto@kernel.org> wrote:
> >
> > On Tue, Apr 23, 2019 at 12:12 AM Nadav Amit <namit@vmware.com> wrote:
>https://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git/log/?h=x86/fixes >> Remove flush_tlb_info variables from the stack. This allows to align
> >> flush_tlb_info to cache-line and avoid potentially unnecessary cache
> >> line movements. It also allows to have a fixed virtual-to-physical
> >> translation of the variables, which reduces TLB misses.
> >>
> >> Use per-CPU struct for flush_tlb_mm_range() and
> >> flush_tlb_kernel_range(). Add debug assertions to ensure there are
> >> no nested TLB flushes that might overwrite the per-CPU data. For
> >> arch_tlbbatch_flush(), use a const struct.
> >>
> >> Results when running a microbenchmarks that performs 10^6 MADV_DONTEED
> >> operations and touching a page, in which 3 additional threads run a
> >> busy-wait loop (5 runs):
> >
> > Can you add a memset(,,,. 0, sizeof(struct flush_tlb_info)) everywhere
> > you grab it?  Or, even better, perhaps do something like:
> >
> > static inline struct flush_tlb_info *get_flush_tlb_info(void)
> > {
> >  /* check reentrancy, make sure that we use smp_processor_id() or
> > otherwise assert that we're bound to a single CPU. */
> >  struct flush_tlb_info *ptr = this_cpu_ptr(...);
> >  memset(ptr, 0, sizeof(*ptr));
> >  return ptr;
> > }
> >
> > static inline void put_flush_tlb_info(void)
> > {
> > /* finish checking reentrancy. */
> > }
>
> I’ll check if the compiler is smart enough to avoid redundant assignments,
> and if it is not, I’ll just give all the struct arguments to
> get_flush_tlb_info() instead of memset() if you don’t mind.

Sounds good.

>
> I also want to give a try for parallelizing the remote and local
> invocations, which really annoys me every time I look at the code.

Yes please!

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86/mm/tlb: Remove flush_tlb_info from the stack
  2019-04-23 17:23     ` Andy Lutomirski
@ 2019-04-25 18:13       ` Nadav Amit
  0 siblings, 0 replies; 5+ messages in thread
From: Nadav Amit @ 2019-04-25 18:13 UTC (permalink / raw)
  To: Andy Lutomirski
  Cc: Peter Zijlstra, Borislav Petkov, Ingo Molnar, Thomas Gleixner,
	X86 ML, LKML, Dave Hansen

> On Apr 23, 2019, at 10:23 AM, Andy Lutomirski <luto@kernel.org> wrote:
> 
> On Tue, Apr 23, 2019 at 9:56 AM Nadav Amit <namit@vmware.com> wrote:
>>> On Apr 23, 2019, at 9:50 AM, Andy Lutomirski <luto@kernel.org> wrote:
>>> 
>>> On Tue, Apr 23, 2019 at 12:12 AM Nadav Amit <namit@vmware.com> wrote:
>> https://nam04.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit.kernel.org%2Fpub%2Fscm%2Flinux%2Fkernel%2Fgit%2Fluto%2Flinux.git%2Flog%2F%3Fh%3Dx86%2Ffixes&amp;data=02%7C01%7Cnamit%40vmware.com%7Cb0ba13a059b54abcb1c008d6c8107a54%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C0%7C0%7C636916370442070349&amp;sdata=5uXX95CD%2FAI5ZSJON%2BjCAKQ88sAwZgme6Az0ZHpEGZo%3D&amp;reserved=0 >> Remove flush_tlb_info variables from the stack. This allows to align
>>>> flush_tlb_info to cache-line and avoid potentially unnecessary cache
>>>> line movements. It also allows to have a fixed virtual-to-physical
>>>> translation of the variables, which reduces TLB misses.
>>>> 
>>>> Use per-CPU struct for flush_tlb_mm_range() and
>>>> flush_tlb_kernel_range(). Add debug assertions to ensure there are
>>>> no nested TLB flushes that might overwrite the per-CPU data. For
>>>> arch_tlbbatch_flush(), use a const struct.
>>>> 
>>>> Results when running a microbenchmarks that performs 10^6 MADV_DONTEED
>>>> operations and touching a page, in which 3 additional threads run a
>>>> busy-wait loop (5 runs):
>>> 
>>> Can you add a memset(,,,. 0, sizeof(struct flush_tlb_info)) everywhere
>>> you grab it?  Or, even better, perhaps do something like:
>>> 
>>> static inline struct flush_tlb_info *get_flush_tlb_info(void)
>>> {
>>> /* check reentrancy, make sure that we use smp_processor_id() or
>>> otherwise assert that we're bound to a single CPU. */
>>> struct flush_tlb_info *ptr = this_cpu_ptr(...);
>>> memset(ptr, 0, sizeof(*ptr));
>>> return ptr;
>>> }
>>> 
>>> static inline void put_flush_tlb_info(void)
>>> {
>>> /* finish checking reentrancy. */
>>> }
>> 
>> I’ll check if the compiler is smart enough to avoid redundant assignments,
>> and if it is not, I’ll just give all the struct arguments to
>> get_flush_tlb_info() instead of memset() if you don’t mind.
> 
> Sounds good.
> 
>> I also want to give a try for parallelizing the remote and local
>> invocations, which really annoys me every time I look at the code.
> 
> Yes please!

I have written some patches and they do provide a considerable performance
improvement of (>10%) for remote TLB flushes. There are still some issues
that need to be resolved, specifically a small slowdown for local TLB
flushes (~15ns).

Anyhow, based on my past experience, I will do this change in a separate
patch-set after the flush_tlb_info off-stack patch makes it through.


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-04-25 18:14 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-23  6:57 [PATCH] x86/mm/tlb: Remove flush_tlb_info from the stack Nadav Amit
2019-04-23 16:50 ` Andy Lutomirski
2019-04-23 16:56   ` Nadav Amit
2019-04-23 17:23     ` Andy Lutomirski
2019-04-25 18:13       ` Nadav Amit

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.