All of lore.kernel.org
 help / color / mirror / Atom feed
From: Qian Cai <quic_qiancai@quicinc.com>
To: Vlastimil Babka <vbabka@suse.cz>,
	Andrew Morton <akpm@linux-foundation.org>,
	Christoph Lameter <cl@linux.com>,
	David Rientjes <rientjes@google.com>,
	Pekka Enberg <penberg@kernel.org>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: <linux-mm@kvack.org>, <linux-kernel@vger.kernel.org>,
	Mike Galbraith <efault@gmx.de>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Thomas Gleixner <tglx@linutronix.de>,
	Mel Gorman <mgorman@techsingularity.net>,
	Jesper Dangaard Brouer <brouer@redhat.com>,
	Jann Horn <jannh@google.com>
Subject: Re: [PATCH v4 29/35] mm: slub: Move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context
Date: Mon, 9 Aug 2021 09:41:48 -0400	[thread overview]
Message-ID: <0b36128c-3e12-77df-85fe-a153a714569b@quicinc.com> (raw)
In-Reply-To: <20210805152000.12817-30-vbabka@suse.cz>



On 8/5/2021 11:19 AM, Vlastimil Babka wrote:
> From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> 
> flush_all() flushes a specific SLAB cache on each CPU (where the cache
> is present). The deactivate_slab()/__free_slab() invocation happens
> within IPI handler and is problematic for PREEMPT_RT.
> 
> The flush operation is not a frequent operation or a hot path. The
> per-CPU flush operation can be moved to within a workqueue.
> 
> [vbabka@suse.cz: adapt to new SLUB changes]
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/slub.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 48 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index dceb289cb052..da48ada3d17f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2513,33 +2513,73 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
>  	unfreeze_partials_cpu(s, c);
>  }
>  
> +struct slub_flush_work {
> +	struct work_struct work;
> +	struct kmem_cache *s;
> +	bool skip;
> +};
> +
>  /*
>   * Flush cpu slab.
>   *
> - * Called from IPI handler with interrupts disabled.
> + * Called from CPU work handler with migration disabled.
>   */
> -static void flush_cpu_slab(void *d)
> +static void flush_cpu_slab(struct work_struct *w)
>  {
> -	struct kmem_cache *s = d;
> -	struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
> +	struct kmem_cache *s;
> +	struct kmem_cache_cpu *c;
> +	struct slub_flush_work *sfw;
> +
> +	sfw = container_of(w, struct slub_flush_work, work);
> +
> +	s = sfw->s;
> +	c = this_cpu_ptr(s->cpu_slab);
>  
>  	if (c->page)
> -		flush_slab(s, c, false);
> +		flush_slab(s, c, true);
>  
>  	unfreeze_partials(s);
>  }
>  
> -static bool has_cpu_slab(int cpu, void *info)
> +static bool has_cpu_slab(int cpu, struct kmem_cache *s)
>  {
> -	struct kmem_cache *s = info;
>  	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
>  
>  	return c->page || slub_percpu_partial(c);
>  }
>  
> +static DEFINE_MUTEX(flush_lock);
> +static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
> +
>  static void flush_all(struct kmem_cache *s)
>  {
> -	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
> +	struct slub_flush_work *sfw;
> +	unsigned int cpu;
> +
> +	mutex_lock(&flush_lock);

Vlastimil, taking the lock here could trigger a warning during memory offline/online due to the locking order:

slab_mutex -> flush_lock

[   91.374541] WARNING: possible circular locking dependency detected
[   91.381411] 5.14.0-rc5-next-20210809+ #84 Not tainted
[   91.387149] ------------------------------------------------------
[   91.394016] lsbug/1523 is trying to acquire lock:
[   91.399406] ffff800018e76530 (flush_lock){+.+.}-{3:3}, at: flush_all+0x50/0x1c8
[   91.407425] 
               but task is already holding lock:
[   91.414638] ffff800018e48468 (slab_mutex){+.+.}-{3:3}, at: slab_memory_callback+0x44/0x280
[   91.423603] 
               which lock already depends on the new lock.

[   91.433854] 
               the existing dependency chain (in reverse order) is:
[   91.442715] 
               -> #4 (slab_mutex){+.+.}-{3:3}:
[   91.449766]        __lock_acquire+0xb0c/0x1aa8
[   91.454901]        lock_acquire+0x34c/0xb20
[   91.459773]        __mutex_lock+0x194/0x1470
[   91.464732]        mutex_lock_nested+0x6c/0xc0
[   91.469864]        slab_memory_callback+0x44/0x280
[   91.475344]        blocking_notifier_call_chain+0xd0/0x138
[   91.481519]        memory_notify+0x28/0x38
[   91.486304]        offline_pages+0x2cc/0xce4
[   91.491262]        memory_subsys_offline+0xd8/0x280
[   91.496827]        device_offline+0x154/0x1e0
[   91.501872]        online_store+0xa4/0x118
[   91.506656]        dev_attr_store+0x44/0x78
[   91.511527]        sysfs_kf_write+0xe8/0x138
[   91.516485]        kernfs_fop_write_iter+0x26c/0x3d0
[   91.522138]        new_sync_write+0x2bc/0x4f8
[   91.527185]        vfs_write+0x718/0xc88
[   91.531795]        ksys_write+0xf8/0x1e0
[   91.536404]        __arm64_sys_write+0x74/0xa8
[   91.541535]        invoke_syscall.constprop.0+0xdc/0x1d8
[   91.547536]        do_el0_svc+0xe4/0x2a8
[   91.552146]        el0_svc+0x64/0x130
[   91.556498]        el0t_64_sync_handler+0xb0/0xb8
[   91.561889]        el0t_64_sync+0x180/0x184
[   91.566760] 
               -> #3 ((memory_chain).rwsem){++++}-{3:3}:
[   91.574680]        __lock_acquire+0xb0c/0x1aa8
[   91.579814]        lock_acquire+0x34c/0xb20
[   91.584685]        down_read+0xf0/0x488
[   91.589210]        blocking_notifier_call_chain+0x58/0x138
[   91.595383]        memory_notify+0x28/0x38
[   91.600167]        offline_pages+0x2cc/0xce4
[   91.605124]        memory_subsys_offline+0xd8/0x280
[   91.610689]        device_offline+0x154/0x1e0
[   91.615734]        online_store+0xa4/0x118
[   91.620518]        dev_attr_store+0x44/0x78
[   91.625388]        sysfs_kf_write+0xe8/0x138
[   91.630346]        kernfs_fop_write_iter+0x26c/0x3d0
[   91.635997]        new_sync_write+0x2bc/0x4f8
[   91.641043]        vfs_write+0x718/0xc88
[   91.645652]        ksys_write+0xf8/0x1e0
[   91.650262]        __arm64_sys_write+0x74/0xa8
[   91.655393]        invoke_syscall.constprop.0+0xdc/0x1d8
[   91.661394]        do_el0_svc+0xe4/0x2a8
[   91.666004]        el0_svc+0x64/0x130
[   91.670355]        el0t_64_sync_handler+0xb0/0xb8
[   91.675747]        el0t_64_sync+0x180/0x184
[   91.680617] 
               -> #2 (pcp_batch_high_lock){+.+.}-{3:3}:
[   91.688449]        __lock_acquire+0xb0c/0x1aa8
[   91.693582]        lock_acquire+0x34c/0xb20
[   91.698452]        __mutex_lock+0x194/0x1470
[   91.703410]        mutex_lock_nested+0x6c/0xc0
[   91.708541]        zone_pcp_update+0x3c/0x68
[   91.713500]        page_alloc_cpu_online+0x64/0x90
[   91.718978]        cpuhp_invoke_callback+0x588/0x2ba8
[   91.724718]        cpuhp_invoke_callback_range+0xa4/0x108
[   91.730804]        cpu_up+0x598/0xb78
[   91.735154]        bringup_nonboot_cpus+0x110/0x168
[   91.740719]        smp_init+0x4c/0xe0
[   91.745070]        kernel_init_freeable+0x554/0x7c8
[   91.750637]        kernel_init+0x2c/0x140
[   91.755334]        ret_from_fork+0x10/0x20
[   91.760118] 
               -> #1 (cpu_hotplug_lock){++++}-{0:0}:
[   91.767688]        __lock_acquire+0xb0c/0x1aa8
[   91.772820]        lock_acquire+0x34c/0xb20
[   91.777691]        cpus_read_lock+0x98/0x308
[   91.782649]        flush_all+0x54/0x1c8
[   91.787173]        __kmem_cache_shrink+0x38/0x2f0
[   91.792566]        kmem_cache_shrink+0x28/0x38
[   91.797699]        acpi_os_purge_cache+0x18/0x28
[   91.803006]        acpi_purge_cached_objects+0x44/0xdc
[   91.808832]        acpi_initialize_objects+0x24/0x88
[   91.814487]        acpi_bus_init+0xe0/0x47c
[   91.819357]        acpi_init+0x130/0x27c
[   91.823967]        do_one_initcall+0x180/0xbe8
[   91.829098]        kernel_init_freeable+0x710/0x7c8
[   91.834663]        kernel_init+0x2c/0x140
[   91.839360]        ret_from_fork+0x10/0x20
[   91.844143] 
               -> #0 (flush_lock){+.+.}-{3:3}:
[   91.851193]        check_prev_add+0x194/0x1170
[   91.856326]        validate_chain+0xfe8/0x1c20
[   91.861458]        __lock_acquire+0xb0c/0x1aa8
[   91.866589]        lock_acquire+0x34c/0xb20
[   91.871460]        __mutex_lock+0x194/0x1470
[   91.876418]        mutex_lock_nested+0x6c/0xc0
[   91.881549]        flush_all+0x50/0x1c8
[   91.886072]        __kmem_cache_shrink+0x38/0x2f0
[   91.891465]        slab_memory_callback+0x68/0x280
[   91.896943]        blocking_notifier_call_chain+0xd0/0x138
[   91.903117]        memory_notify+0x28/0x38
[   91.907901]        offline_pages+0x2cc/0xce4
[   91.912859]        memory_subsys_offline+0xd8/0x280
[   91.918424]        device_offline+0x154/0x1e0
[   91.923470]        online_store+0xa4/0x118
[   91.928254]        dev_attr_store+0x44/0x78
[   91.933125]        sysfs_kf_write+0xe8/0x138
[   91.938083]        kernfs_fop_write_iter+0x26c/0x3d0
[   91.943735]        new_sync_write+0x2bc/0x4f8
[   91.948781]        vfs_write+0x718/0xc88
[   91.953391]        ksys_write+0xf8/0x1e0
[   91.958000]        __arm64_sys_write+0x74/0xa8
[   91.963130]        invoke_syscall.constprop.0+0xdc/0x1d8
[   91.969131]        do_el0_svc+0xe4/0x2a8
[   91.973741]        el0_svc+0x64/0x130
[   91.978093]        el0t_64_sync_handler+0xb0/0xb8
[   91.983484]        el0t_64_sync+0x180/0x184
[   91.988354] 
               other info that might help us debug this:

[   91.998431] Chain exists of:
                 flush_lock --> (memory_chain).rwsem --> slab_mutex

[   92.010867]  Possible unsafe locking scenario:

[   92.018166]        CPU0                    CPU1
[   92.023380]        ----                    ----
[   92.028595]   lock(slab_mutex);
[   92.032425]                                lock((memory_chain).rwsem);
[   92.039641]                                lock(slab_mutex);
[   92.045989]   lock(flush_lock);
[   92.049819] 
                *** DEADLOCK ***

[   92.057811] 10 locks held by lsbug/1523:
[   92.062420]  #0: ffff0000505a8430 (sb_writers#6){.+.+}-{0:0}, at: ksys_write+0xf8/0x1e0
[   92.071128]  #1: ffff000870f99e88 (&of->mutex){+.+.}-{3:3}, at: kernfs_fop_write_iter+0x1dc/0x3d0
[   92.080701]  #2: ffff0000145b2ab8 (kn->active#175){.+.+}-{0:0}, at: kernfs_fop_write_iter+0x1f8/0x3d0
[   92.090623]  #3: ffff800018f84f08 (device_hotplug_lock){+.+.}-{3:3}, at: lock_device_hotplug_sysfs+0x24/0x88
[   92.101151]  #4: ffff0000145e9190 (&dev->mutex){....}-{3:3}, at: device_offline+0xa0/0x1e0
[   92.110115]  #5: ffff800011d26450 (cpu_hotplug_lock){++++}-{0:0}, at: offline_pages+0x10c/0xce4
[   92.119514]  #6: ffff800018e60570 (mem_hotplug_lock){++++}-{0:0}, at: offline_pages+0x11c/0xce4
[   92.128919]  #7: ffff800018e5bb68 (pcp_batch_high_lock){+.+.}-{3:3}, at: zone_pcp_disable+0x30/0x60
[   92.138668]  #8: ffff800018fa0610 ((memory_chain).rwsem){++++}-{3:3}, at: blocking_notifier_call_chain+0x58/0x138
[   92.149633]  #9: ffff800018e48468 (slab_mutex){+.+.}-{3:3}, at: slab_memory_callback+0x44/0x280
[   92.159033] 
               stack backtrace:
[   92.164772] CPU: 29 PID: 1523 Comm: lsbug Not tainted 5.14.0-rc5-next-20210809+ #84
[   92.173116] Hardware name: MiTAC RAPTOR EV-883832-X3-0001/RAPTOR, BIOS 1.6 06/28/2020
[   92.181631] Call trace:
[   92.184763]  dump_backtrace+0x0/0x3b8
[   92.189115]  show_stack+0x20/0x30
[   92.193118]  dump_stack_lvl+0x8c/0xb8
[   92.197469]  dump_stack+0x1c/0x38
[   92.201472]  print_circular_bug.isra.0+0x530/0x540
[   92.206953]  check_noncircular+0x27c/0x2f0
[   92.211738]  check_prev_add+0x194/0x1170
[   92.216349]  validate_chain+0xfe8/0x1c20
[   92.220961]  __lock_acquire+0xb0c/0x1aa8
[   92.225571]  lock_acquire+0x34c/0xb20
[   92.229921]  __mutex_lock+0x194/0x1470
[   92.234358]  mutex_lock_nested+0x6c/0xc0
[   92.238968]  flush_all+0x50/0x1c8
flush_all at /usr/src/linux-next/mm/slub.c:2649
[   92.242971]  __kmem_cache_shrink+0x38/0x2f0
[   92.247842]  slab_memory_callback+0x68/0x280
slab_mem_going_offline_callback at /usr/src/linux-next/mm/slub.c:4586
(inlined by) slab_memory_callback at /usr/src/linux-next/mm/slub.c:4678
[   92.252800]  blocking_notifier_call_chain+0xd0/0x138
notifier_call_chain at /usr/src/linux-next/kernel/notifier.c:83
(inlined by) blocking_notifier_call_chain at /usr/src/linux-next/kernel/notifier.c:337
(inlined by) blocking_notifier_call_chain at /usr/src/linux-next/kernel/notifier.c:325
[   92.258453]  memory_notify+0x28/0x38
[   92.262717]  offline_pages+0x2cc/0xce4
[   92.267153]  memory_subsys_offline+0xd8/0x280
[   92.272198]  device_offline+0x154/0x1e0
[   92.276723]  online_store+0xa4/0x118
[   92.280986]  dev_attr_store+0x44/0x78
[   92.285336]  sysfs_kf_write+0xe8/0x138
[   92.289774]  kernfs_fop_write_iter+0x26c/0x3d0
[   92.294906]  new_sync_write+0x2bc/0x4f8
[   92.299431]  vfs_write+0x718/0xc88
[   92.303520]  ksys_write+0xf8/0x1e0
[   92.307608]  __arm64_sys_write+0x74/0xa8
[   92.312219]  invoke_syscall.constprop.0+0xdc/0x1d8
[   92.317698]  do_el0_svc+0xe4/0x2a8
[   92.321789]  el0_svc+0x64/0x130
[   92.325619]  el0t_64_sync_handler+0xb0/0xb8
[   92.330489]  el0t_64_sync+0x180/0x184

> +	cpus_read_lock();
> +
> +	for_each_online_cpu(cpu) {
> +		sfw = &per_cpu(slub_flush, cpu);
> +		if (!has_cpu_slab(cpu, s)) {
> +			sfw->skip = true;
> +			continue;
> +		}
> +		INIT_WORK(&sfw->work, flush_cpu_slab);
> +		sfw->skip = false;
> +		sfw->s = s;
> +		schedule_work_on(cpu, &sfw->work);
> +	}
> +
> +	for_each_online_cpu(cpu) {
> +		sfw = &per_cpu(slub_flush, cpu);
> +		if (sfw->skip)
> +			continue;
> +		flush_work(&sfw->work);
> +	}
> +
> +	cpus_read_unlock();
> +	mutex_unlock(&flush_lock);
>  }
>  
>  /*
> 

  reply	other threads:[~2021-08-09 13:41 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-05 15:19 [PATCH v4 00/35] SLUB: reduce irq disabled scope and make it RT compatible Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 01/35] mm, slub: don't call flush_all() from slab_debug_trace_open() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 02/35] mm, slub: allocate private object map for debugfs listings Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 03/35] mm, slub: allocate private object map for validate_slab_cache() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 04/35] mm, slub: don't disable irq for debug_check_no_locks_freed() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 05/35] mm, slub: remove redundant unfreeze_partials() from put_cpu_partial() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 06/35] mm, slub: unify cmpxchg_double_slab() and __cmpxchg_double_slab() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 07/35] mm, slub: extract get_partial() from new_slab_objects() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 08/35] mm, slub: dissolve new_slab_objects() into ___slab_alloc() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 09/35] mm, slub: return slab page from get_partial() and set c->page afterwards Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 10/35] mm, slub: restructure new page checks in ___slab_alloc() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 11/35] mm, slub: simplify kmem_cache_cpu and tid setup Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 12/35] mm, slub: move disabling/enabling irqs to ___slab_alloc() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 13/35] mm, slub: do initial checks in ___slab_alloc() with irqs enabled Vlastimil Babka
2021-08-15 10:14   ` Vlastimil Babka
2021-08-15 10:22     ` Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 14/35] mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 15/35] mm, slub: restore irqs around calling new_slab() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 16/35] mm, slub: validate slab from partial list or page allocator before making it cpu slab Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 17/35] mm, slub: check new pages with restored irqs Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 18/35] mm, slub: stop disabling irqs around get_partial() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 19/35] mm, slub: move reset of c->page and freelist out of deactivate_slab() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 20/35] mm, slub: make locking in deactivate_slab() irq-safe Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 21/35] mm, slub: call deactivate_slab() without disabling irqs Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 22/35] mm, slub: move irq control into unfreeze_partials() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 23/35] mm, slub: discard slabs in unfreeze_partials() without irqs disabled Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 24/35] mm, slub: detach whole partial list at once in unfreeze_partials() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 25/35] mm, slub: separate detaching of partial list in unfreeze_partials() from unfreezing Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 26/35] mm, slub: only disable irq with spin_lock in __unfreeze_partials() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 27/35] mm, slub: don't disable irqs in slub_cpu_dead() Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 28/35] mm, slab: make flush_slab() possible to call with irqs enabled Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 29/35] mm: slub: Move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context Vlastimil Babka
2021-08-09 13:41   ` Qian Cai [this message]
2021-08-09 18:44     ` Mike Galbraith
2021-08-09 18:44       ` Mike Galbraith
2021-08-09 20:08       ` Vlastimil Babka
2021-08-09 22:13         ` Qian Cai
2021-08-10  1:07         ` Mike Galbraith
2021-08-10  1:07           ` Mike Galbraith
2021-08-10  9:03     ` Vlastimil Babka
2021-08-10 11:47       ` Mike Galbraith
2021-08-10 11:47         ` Mike Galbraith
2021-08-10 20:31         ` Paul E. McKenney
2021-08-10 22:36           ` Vlastimil Babka
2021-08-10 23:53             ` Paul E. McKenney
2021-08-11 14:17               ` Paul E. McKenney
2021-08-10 20:25       ` Paul E. McKenney
2021-08-10 14:33     ` Vlastimil Babka
2021-08-11  1:42       ` Qian Cai
2021-08-11  8:55       ` Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 30/35] mm: slub: Make object_map_lock a raw_spinlock_t Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 31/35] mm, slub: optionally save/restore irqs in slab_[un]lock()/ Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 32/35] mm, slub: make slab_lock() disable irqs with PREEMPT_RT Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 33/35] mm, slub: protect put_cpu_partial() with disabled irqs instead of cmpxchg Vlastimil Babka
2021-08-05 15:19 ` [PATCH v4 34/35] mm, slub: use migrate_disable() on PREEMPT_RT Vlastimil Babka
2021-08-05 15:20 ` [PATCH v4 35/35] mm, slub: convert kmem_cpu_slab protection to local_lock Vlastimil Babka
2021-08-15 12:27   ` Sven Eckelmann
2021-08-17  8:37     ` Vlastimil Babka
2021-08-17  9:12       ` Sebastian Andrzej Siewior
2021-08-17  9:17         ` Vlastimil Babka
2021-08-17  9:31           ` Sebastian Andrzej Siewior
2021-08-17  9:31         ` Vlastimil Babka
2021-08-17  9:34           ` Sebastian Andrzej Siewior
2021-08-17  9:13     ` Vlastimil Babka
2021-08-17 10:14   ` Vlastimil Babka
2021-08-17 19:53     ` Andrew Morton
2021-08-18 11:52       ` Vlastimil Babka
2021-08-23 20:36         ` Thomas Gleixner
2021-08-17 15:39   ` Sebastian Andrzej Siewior
2021-08-17 15:41     ` Vlastimil Babka
2021-08-17 15:49       ` Sebastian Andrzej Siewior
2021-08-17 15:56   ` Vlastimil Babka
2021-08-05 16:42 ` [PATCH v4 00/35] SLUB: reduce irq disabled scope and make it RT compatible Sebastian Andrzej Siewior
2021-08-06  5:14   ` Mike Galbraith
2021-08-06  5:14     ` Mike Galbraith
2021-08-06  7:45     ` Vlastimil Babka
2021-08-10 14:36 ` Vlastimil Babka
2021-08-15 10:18   ` Vlastimil Babka
2021-08-17 10:23     ` Vlastimil Babka
2021-08-17 15:59       ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0b36128c-3e12-77df-85fe-a153a714569b@quicinc.com \
    --to=quic_qiancai@quicinc.com \
    --cc=akpm@linux-foundation.org \
    --cc=bigeasy@linutronix.de \
    --cc=brouer@redhat.com \
    --cc=cl@linux.com \
    --cc=efault@gmx.de \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=jannh@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@techsingularity.net \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=tglx@linutronix.de \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.