linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] blk-iocost: add refcounting for iocg and ioc
@ 2022-12-27 12:55 Yu Kuai
  2022-12-27 12:55 ` [PATCH v2 1/2] blk-iocost: add refcounting for iocg Yu Kuai
  2022-12-27 12:55 ` [PATCH v2 2/2] blk-iocost: add refcounting for ioc Yu Kuai
  0 siblings, 2 replies; 22+ messages in thread
From: Yu Kuai @ 2022-12-27 12:55 UTC (permalink / raw)
  To: tj, hch, josef, axboe
  Cc: cgroups, linux-block, linux-kernel, yukuai3, yukuai1, yi.zhang

From: Yu Kuai <yukuai3@huawei.com>

Changes in v2:
 - Instead add ioc_pd_offline(), and move operations for iocg from
 ioc_pd_free() to ioc_pd_offline(), use refcounting to fix the problem.

Yu Kuai (2):
  blk-iocost: add refcounting for iocg
  blk-iocost: add refcounting for ioc

 block/blk-iocost.c | 86 ++++++++++++++++++++++++++++++++--------------
 1 file changed, 61 insertions(+), 25 deletions(-)

-- 
2.31.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2022-12-27 12:55 [PATCH v2 0/2] blk-iocost: add refcounting for iocg and ioc Yu Kuai
@ 2022-12-27 12:55 ` Yu Kuai
  2023-01-04 21:44   ` Tejun Heo
  2022-12-27 12:55 ` [PATCH v2 2/2] blk-iocost: add refcounting for ioc Yu Kuai
  1 sibling, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2022-12-27 12:55 UTC (permalink / raw)
  To: tj, hch, josef, axboe
  Cc: cgroups, linux-block, linux-kernel, yukuai3, yukuai1, yi.zhang

From: Yu Kuai <yukuai3@huawei.com>

iocost requires that child iocg must exit before parent iocg, otherwise
kernel might crash in ioc_timer_fn(). However, currently iocg is exited
in pd_free_fn(), which can't guarantee such order:

1) remove cgroup can concurrent with deactivate policy;
2) blkg_free() triggered by remove cgroup is asynchronously, remove
child cgroup can concurrent with remove parent cgroup;

Fix the problem by add refcounting for iocg, and child iocg will grab
reference of parent iocg, so that parent iocg will wait for all child
iocg to be exited.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 block/blk-iocost.c | 74 +++++++++++++++++++++++++++++++---------------
 1 file changed, 50 insertions(+), 24 deletions(-)

diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 7a0d754b9eb2..525e93e1175a 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -461,6 +461,8 @@ struct ioc_gq {
 	struct blkg_policy_data		pd;
 	struct ioc			*ioc;
 
+	refcount_t			ref;
+
 	/*
 	 * A iocg can get its weight from two sources - an explicit
 	 * per-device-cgroup configuration or the default weight of the
@@ -2943,9 +2945,53 @@ static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
 		return NULL;
 	}
 
+	refcount_set(&iocg->ref, 1);
 	return &iocg->pd;
 }
 
+static void iocg_get(struct ioc_gq *iocg)
+{
+	refcount_inc(&iocg->ref);
+}
+
+static void iocg_put(struct ioc_gq *iocg)
+{
+	struct ioc *ioc = iocg->ioc;
+	unsigned long flags;
+	struct ioc_gq *parent = NULL;
+
+	if (!refcount_dec_and_test(&iocg->ref))
+		return;
+
+	if (iocg->level > 0)
+		parent = iocg->ancestors[iocg->level - 1];
+
+	if (ioc) {
+		spin_lock_irqsave(&ioc->lock, flags);
+
+		if (!list_empty(&iocg->active_list)) {
+			struct ioc_now now;
+
+			ioc_now(ioc, &now);
+			propagate_weights(iocg, 0, 0, false, &now);
+			list_del_init(&iocg->active_list);
+		}
+
+		WARN_ON_ONCE(!list_empty(&iocg->walk_list));
+		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
+
+		spin_unlock_irqrestore(&ioc->lock, flags);
+
+		hrtimer_cancel(&iocg->waitq_timer);
+	}
+
+	free_percpu(iocg->pcpu_stat);
+	kfree(iocg);
+
+	if (parent)
+		iocg_put(parent);
+}
+
 static void ioc_pd_init(struct blkg_policy_data *pd)
 {
 	struct ioc_gq *iocg = pd_to_iocg(pd);
@@ -2973,6 +3019,9 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
 
 	iocg->level = blkg->blkcg->css.cgroup->level;
 
+	if (blkg->parent)
+		iocg_get(blkg_to_iocg(blkg->parent));
+
 	for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
 		struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
 		iocg->ancestors[tiocg->level] = tiocg;
@@ -2985,30 +3034,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
 
 static void ioc_pd_free(struct blkg_policy_data *pd)
 {
-	struct ioc_gq *iocg = pd_to_iocg(pd);
-	struct ioc *ioc = iocg->ioc;
-	unsigned long flags;
-
-	if (ioc) {
-		spin_lock_irqsave(&ioc->lock, flags);
-
-		if (!list_empty(&iocg->active_list)) {
-			struct ioc_now now;
-
-			ioc_now(ioc, &now);
-			propagate_weights(iocg, 0, 0, false, &now);
-			list_del_init(&iocg->active_list);
-		}
-
-		WARN_ON_ONCE(!list_empty(&iocg->walk_list));
-		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
-
-		spin_unlock_irqrestore(&ioc->lock, flags);
-
-		hrtimer_cancel(&iocg->waitq_timer);
-	}
-	free_percpu(iocg->pcpu_stat);
-	kfree(iocg);
+	iocg_put(pd_to_iocg(pd));
 }
 
 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v2 2/2] blk-iocost: add refcounting for ioc
  2022-12-27 12:55 [PATCH v2 0/2] blk-iocost: add refcounting for iocg and ioc Yu Kuai
  2022-12-27 12:55 ` [PATCH v2 1/2] blk-iocost: add refcounting for iocg Yu Kuai
@ 2022-12-27 12:55 ` Yu Kuai
  2023-01-04 21:45   ` Tejun Heo
  1 sibling, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2022-12-27 12:55 UTC (permalink / raw)
  To: tj, hch, josef, axboe
  Cc: cgroups, linux-block, linux-kernel, yukuai3, yukuai1, yi.zhang

From: Yu Kuai <yukuai3@huawei.com>

Our test found the following problem in kernel 5.10, and the same problem
should exist in mainline:

BUG: KASAN: use-after-free in _raw_spin_lock_irqsave+0x71/0xe0
Write of size 4 at addr ffff8881432000e0 by task swapper/4/0
...
Call Trace:
 <IRQ>
 dump_stack+0x9c/0xd3
 print_address_description.constprop.0+0x19/0x170
 __kasan_report.cold+0x6c/0x84
 kasan_report+0x3a/0x50
 check_memory_region+0xfd/0x1f0
 _raw_spin_lock_irqsave+0x71/0xe0
 ioc_pd_free+0x9d/0x250
 blkg_free.part.0+0x80/0x100
 __blkg_release+0xf3/0x1c0
 rcu_do_batch+0x292/0x700
 rcu_core+0x270/0x2d0
 __do_softirq+0xfd/0x402
  </IRQ>
 asm_call_irq_on_stack+0x12/0x20
 do_softirq_own_stack+0x37/0x50
 irq_exit_rcu+0x134/0x1a0
 sysvec_apic_timer_interrupt+0x36/0x80
 asm_sysvec_apic_timer_interrupt+0x12/0x20

 Freed by task 57:
 kfree+0xba/0x680
 rq_qos_exit+0x5a/0x80
 blk_cleanup_queue+0xce/0x1a0
 virtblk_remove+0x77/0x130 [virtio_blk]
 virtio_dev_remove+0x56/0xe0
 __device_release_driver+0x2ba/0x450
 device_release_driver+0x29/0x40
 bus_remove_device+0x1d8/0x2c0
 device_del+0x333/0x7e0
 device_unregister+0x27/0x90
 unregister_virtio_device+0x22/0x40
 virtio_pci_remove+0x53/0xb0
 pci_device_remove+0x7a/0x130
 __device_release_driver+0x2ba/0x450
 device_release_driver+0x29/0x40
 pci_stop_bus_device+0xcf/0x100
 pci_stop_and_remove_bus_device+0x16/0x20
 disable_slot+0xa1/0x110
 acpiphp_disable_and_eject_slot+0x35/0xe0
 hotplug_event+0x1b8/0x3c0
 acpiphp_hotplug_notify+0x37/0x70
 acpi_device_hotplug+0xee/0x320
 acpi_hotplug_work_fn+0x69/0x80
 process_one_work+0x3c5/0x730
 worker_thread+0x93/0x650
 kthread+0x1ba/0x210
 ret_from_fork+0x22/0x30

Root cause is that blkg_free() can be asynchronously, and it can race
with delete device:

T1			T2		T3
//delete device
del_gendisk
 bdi_unregister
  bdi_remove_from_list
   synchronize_rcu_expedited

			//rmdir cgroup
			blkcg_destroy_blkgs
			 blkg_destroy
			  percpu_ref_kill
			   blkg_release
			    call_rcu
 rq_qos_exit
  ioc_rqos_exit
   kfree(ioc)
					__blkg_release
					 blkg_free
					  blkg_free_workfn
					   pd_free_fn
					    ioc_pd_free
					     spin_lock_irqsave

Fix the problem by add refcounting for ioc, and iocg will grab reference
of ioc, so that ioc won't be freed until all the iocg is exited.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 block/blk-iocost.c | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 525e93e1175a..d168d3f5f78e 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -404,6 +404,7 @@ struct ioc_pcpu_stat {
 struct ioc {
 	struct rq_qos			rqos;
 
+	refcount_t			ref;
 	bool				enabled;
 
 	struct ioc_params		params;
@@ -2816,6 +2817,12 @@ static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
 	spin_unlock_irq(&ioc->lock);
 }
 
+static void ioc_put(struct ioc *ioc)
+{
+	if (refcount_dec_and_test(&ioc->ref))
+		kfree(ioc);
+}
+
 static void ioc_rqos_exit(struct rq_qos *rqos)
 {
 	struct ioc *ioc = rqos_to_ioc(rqos);
@@ -2828,7 +2835,7 @@ static void ioc_rqos_exit(struct rq_qos *rqos)
 
 	del_timer_sync(&ioc->timer);
 	free_percpu(ioc->pcpu_stat);
-	kfree(ioc);
+	ioc_put(ioc);
 }
 
 static struct rq_qos_ops ioc_rqos_ops = {
@@ -2883,6 +2890,7 @@ static int blk_iocost_init(struct gendisk *disk)
 	ioc->period_at = ktime_to_us(ktime_get());
 	atomic64_set(&ioc->cur_period, 0);
 	atomic_set(&ioc->hweight_gen, 0);
+	refcount_set(&ioc->ref, 1);
 
 	spin_lock_irq(&ioc->lock);
 	ioc->autop_idx = AUTOP_INVALID;
@@ -2983,6 +2991,7 @@ static void iocg_put(struct ioc_gq *iocg)
 		spin_unlock_irqrestore(&ioc->lock, flags);
 
 		hrtimer_cancel(&iocg->waitq_timer);
+		ioc_put(ioc);
 	}
 
 	free_percpu(iocg->pcpu_stat);
@@ -3004,6 +3013,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
 	ioc_now(ioc, &now);
 
 	iocg->ioc = ioc;
+	refcount_inc(&ioc->ref);
 	atomic64_set(&iocg->vtime, now.vnow);
 	atomic64_set(&iocg->done_vtime, now.vnow);
 	atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2022-12-27 12:55 ` [PATCH v2 1/2] blk-iocost: add refcounting for iocg Yu Kuai
@ 2023-01-04 21:44   ` Tejun Heo
  2023-01-05  1:14     ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-04 21:44 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yukuai3, yi.zhang

On Tue, Dec 27, 2022 at 08:55:01PM +0800, Yu Kuai wrote:
> From: Yu Kuai <yukuai3@huawei.com>
> 
> iocost requires that child iocg must exit before parent iocg, otherwise
> kernel might crash in ioc_timer_fn(). However, currently iocg is exited
> in pd_free_fn(), which can't guarantee such order:
> 
> 1) remove cgroup can concurrent with deactivate policy;
> 2) blkg_free() triggered by remove cgroup is asynchronously, remove
> child cgroup can concurrent with remove parent cgroup;
> 
> Fix the problem by add refcounting for iocg, and child iocg will grab
> reference of parent iocg, so that parent iocg will wait for all child
> iocg to be exited.

Wouldn't it be better to do this refcnting in the blk-cgroup core code
rather than in blk-iocost?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 2/2] blk-iocost: add refcounting for ioc
  2022-12-27 12:55 ` [PATCH v2 2/2] blk-iocost: add refcounting for ioc Yu Kuai
@ 2023-01-04 21:45   ` Tejun Heo
  0 siblings, 0 replies; 22+ messages in thread
From: Tejun Heo @ 2023-01-04 21:45 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yukuai3, yi.zhang

On Tue, Dec 27, 2022 at 08:55:02PM +0800, Yu Kuai wrote:
> Root cause is that blkg_free() can be asynchronously, and it can race
> with delete device:
> 
> T1			T2		T3
> //delete device
> del_gendisk
>  bdi_unregister
>   bdi_remove_from_list
>    synchronize_rcu_expedited
> 
> 			//rmdir cgroup
> 			blkcg_destroy_blkgs
> 			 blkg_destroy
> 			  percpu_ref_kill
> 			   blkg_release
> 			    call_rcu
>  rq_qos_exit
>   ioc_rqos_exit
>    kfree(ioc)
> 					__blkg_release
> 					 blkg_free
> 					  blkg_free_workfn
> 					   pd_free_fn
> 					    ioc_pd_free
> 					     spin_lock_irqsave
> 
> Fix the problem by add refcounting for ioc, and iocg will grab reference
> of ioc, so that ioc won't be freed until all the iocg is exited.

Ditto, why do this in iocost instead of blk-cgroup core?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-04 21:44   ` Tejun Heo
@ 2023-01-05  1:14     ` Yu Kuai
  2023-01-05 18:32       ` Tejun Heo
  0 siblings, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2023-01-05  1:14 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/05 5:44, Tejun Heo 写道:
> On Tue, Dec 27, 2022 at 08:55:01PM +0800, Yu Kuai wrote:
>> From: Yu Kuai <yukuai3@huawei.com>
>>
>> iocost requires that child iocg must exit before parent iocg, otherwise
>> kernel might crash in ioc_timer_fn(). However, currently iocg is exited
>> in pd_free_fn(), which can't guarantee such order:
>>
>> 1) remove cgroup can concurrent with deactivate policy;
>> 2) blkg_free() triggered by remove cgroup is asynchronously, remove
>> child cgroup can concurrent with remove parent cgroup;
>>
>> Fix the problem by add refcounting for iocg, and child iocg will grab
>> reference of parent iocg, so that parent iocg will wait for all child
>> iocg to be exited.
> 
> Wouldn't it be better to do this refcnting in the blk-cgroup core code
> rather than in blk-iocost?
> 

The problem is that I can't find a proper way to fix the competition
that pd_free_fn() can be called from different context:

1) from blkg_free() that is called asynchronously from removing cgroup;
2) from blkcg_deactivate_policy() that is called from removing device;

1) is related to blkg, while 2) is not, hence refcnting from blkg can't
fix the problem. refcnting from blkcg_policy_data should be ok, but I
see that bfq already has the similar refcnting, while other policy
doesn't require such refcnting.

Any suggestions?

Thanks,
Kuai
> Thanks.
> 


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-05  1:14     ` Yu Kuai
@ 2023-01-05 18:32       ` Tejun Heo
  2023-01-06  1:08         ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-05 18:32 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

On Thu, Jan 05, 2023 at 09:14:07AM +0800, Yu Kuai wrote:
> 1) is related to blkg, while 2) is not, hence refcnting from blkg can't
> fix the problem. refcnting from blkcg_policy_data should be ok, but I
> see that bfq already has the similar refcnting, while other policy
> doesn't require such refcnting.

Hmm... taking a step back, wouldn't this be solved by moving the first part
of ioc_pd_free() to pd_offline_fn()? The ordering is strictly defined there,
right?

Thanks.

--
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-05 18:32       ` Tejun Heo
@ 2023-01-06  1:08         ` Yu Kuai
  2023-01-06 20:18           ` Tejun Heo
  0 siblings, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2023-01-06  1:08 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/06 2:32, Tejun Heo 写道:
> On Thu, Jan 05, 2023 at 09:14:07AM +0800, Yu Kuai wrote:
>> 1) is related to blkg, while 2) is not, hence refcnting from blkg can't
>> fix the problem. refcnting from blkcg_policy_data should be ok, but I
>> see that bfq already has the similar refcnting, while other policy
>> doesn't require such refcnting.
> 
> Hmm... taking a step back, wouldn't this be solved by moving the first part
> of ioc_pd_free() to pd_offline_fn()? The ordering is strictly defined there,
> right?
> 

Moving first part to pd_offline_fn() has some requirements, like what I
did in the other thread:

iocg can be activated again after pd_offline_fn(), which is possible
because bio can be dispatched when cgroup is removed. I tried to avoid
that by:

1) dispatch all throttled bio io ioc_pd_offline()
2) don't throttle bio after ioc_pd_offline()

However, you already disagreed with that. 😔

Thanks,
Kuai

> Thanks.
> 
> --
> tejun
> .
> 


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-06  1:08         ` Yu Kuai
@ 2023-01-06 20:18           ` Tejun Heo
  2023-01-09  1:32             ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-06 20:18 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

On Fri, Jan 06, 2023 at 09:08:45AM +0800, Yu Kuai wrote:
> Hi,
> 
> 在 2023/01/06 2:32, Tejun Heo 写道:
> > On Thu, Jan 05, 2023 at 09:14:07AM +0800, Yu Kuai wrote:
> > > 1) is related to blkg, while 2) is not, hence refcnting from blkg can't
> > > fix the problem. refcnting from blkcg_policy_data should be ok, but I
> > > see that bfq already has the similar refcnting, while other policy
> > > doesn't require such refcnting.
> > 
> > Hmm... taking a step back, wouldn't this be solved by moving the first part
> > of ioc_pd_free() to pd_offline_fn()? The ordering is strictly defined there,
> > right?
> > 
> 
> Moving first part to pd_offline_fn() has some requirements, like what I
> did in the other thread:
> 
> iocg can be activated again after pd_offline_fn(), which is possible
> because bio can be dispatched when cgroup is removed. I tried to avoid
> that by:
> 
> 1) dispatch all throttled bio io ioc_pd_offline()
> 2) don't throttle bio after ioc_pd_offline()
> 
> However, you already disagreed with that. 😔

Okay, I was completely wrong while I was replying to your original patch.
Should have looked at the code closer, my apologies.

What I missed is that pd_offline doesn't happen when the cgroup goes
offline. Please take a look at the following two commits:

 59b57717fff8 ("blkcg: delay blkg destruction until after writeback has finished")
 d866dbf61787 ("blkcg: rename blkcg->cgwb_refcnt to ->online_pin and always use it")

After the above two commits, ->pd_offline_fn() is called only after all
possible writebacks are complete, so it shouldn't allow mass escapes to
root. With writebacks out of the picture, it might be that there can be no
further IOs once ->pd_offline_fn() is called too as there can be no tasks
left in it and no dirty pages, but best to confirm that.

So, yeah, the original approach you took should work although I'm not sure
the patches that you added to make offline blkg to bypass are necessary
(that also contributed to my assumption that there will be more IOs on those
blkg's). Have you seen more IOs coming down the pipeline after offline? If
so, can you dump some backtraces and see where they're coming from?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-06 20:18           ` Tejun Heo
@ 2023-01-09  1:32             ` Yu Kuai
  2023-01-09 18:23               ` Tejun Heo
  0 siblings, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2023-01-09  1:32 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/07 4:18, Tejun Heo 写道:
> On Fri, Jan 06, 2023 at 09:08:45AM +0800, Yu Kuai wrote:
>> Hi,
>>
>> 在 2023/01/06 2:32, Tejun Heo 写道:
>>> On Thu, Jan 05, 2023 at 09:14:07AM +0800, Yu Kuai wrote:
>>>> 1) is related to blkg, while 2) is not, hence refcnting from blkg can't
>>>> fix the problem. refcnting from blkcg_policy_data should be ok, but I
>>>> see that bfq already has the similar refcnting, while other policy
>>>> doesn't require such refcnting.
>>>
>>> Hmm... taking a step back, wouldn't this be solved by moving the first part
>>> of ioc_pd_free() to pd_offline_fn()? The ordering is strictly defined there,
>>> right?
>>>
>>
>> Moving first part to pd_offline_fn() has some requirements, like what I
>> did in the other thread:
>>
>> iocg can be activated again after pd_offline_fn(), which is possible
>> because bio can be dispatched when cgroup is removed. I tried to avoid
>> that by:
>>
>> 1) dispatch all throttled bio io ioc_pd_offline()
>> 2) don't throttle bio after ioc_pd_offline()
>>
>> However, you already disagreed with that. 😔
> 
> Okay, I was completely wrong while I was replying to your original patch.
> Should have looked at the code closer, my apologies.
> 
> What I missed is that pd_offline doesn't happen when the cgroup goes
> offline. Please take a look at the following two commits:
> 
>   59b57717fff8 ("blkcg: delay blkg destruction until after writeback has finished")
>   d866dbf61787 ("blkcg: rename blkcg->cgwb_refcnt to ->online_pin and always use it")
> 

These two commits are applied for three years, I don't check the details
yet but they seem can't guarantee that no io will be handled by
rq_qos_throttle() after pd_offline_fn(), because I just reproduced this
in another problem:

f02be9002c48 ("block, bfq: fix null pointer dereference in bfq_bio_bfqg()")

User thread can issue async io, and io can be throttled by
blk-throttle(not writeback), then user thread can exit and cgroup can be
removed before such io is dispatched to rq_qos_throttle.

> After the above two commits, ->pd_offline_fn() is called only after all
> possible writebacks are complete, so it shouldn't allow mass escapes to
> root. With writebacks out of the picture, it might be that there can be no
> further IOs once ->pd_offline_fn() is called too as there can be no tasks
> left in it and no dirty pages, but best to confirm that.
> 
> So, yeah, the original approach you took should work although I'm not sure
> the patches that you added to make offline blkg to bypass are necessary
> (that also contributed to my assumption that there will be more IOs on those
> blkg's). Have you seen more IOs coming down the pipeline after offline? If
> so, can you dump some backtraces and see where they're coming from?

Currently I'm sure such IOs can come from blk-throttle, and I'm not sure
yet but I also suspect io_uring can do this.

Thanks,
Kuai


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-09  1:32             ` Yu Kuai
@ 2023-01-09 18:23               ` Tejun Heo
  2023-01-10  1:39                 ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-09 18:23 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hello,

On Mon, Jan 09, 2023 at 09:32:46AM +0800, Yu Kuai wrote:
> >   59b57717fff8 ("blkcg: delay blkg destruction until after writeback has finished")
> >   d866dbf61787 ("blkcg: rename blkcg->cgwb_refcnt to ->online_pin and always use it")
> 
> These two commits are applied for three years, I don't check the details
> yet but they seem can't guarantee that no io will be handled by
> rq_qos_throttle() after pd_offline_fn(), because I just reproduced this
> in another problem:
> 
> f02be9002c48 ("block, bfq: fix null pointer dereference in bfq_bio_bfqg()")
> 
> User thread can issue async io, and io can be throttled by
> blk-throttle(not writeback), then user thread can exit and cgroup can be
> removed before such io is dispatched to rq_qos_throttle.

I see.

> > After the above two commits, ->pd_offline_fn() is called only after all
> > possible writebacks are complete, so it shouldn't allow mass escapes to
> > root. With writebacks out of the picture, it might be that there can be no
> > further IOs once ->pd_offline_fn() is called too as there can be no tasks
> > left in it and no dirty pages, but best to confirm that.
> > 
> > So, yeah, the original approach you took should work although I'm not sure
> > the patches that you added to make offline blkg to bypass are necessary
> > (that also contributed to my assumption that there will be more IOs on those
> > blkg's). Have you seen more IOs coming down the pipeline after offline? If
> > so, can you dump some backtraces and see where they're coming from?
> 
> Currently I'm sure such IOs can come from blk-throttle, and I'm not sure
> yet but I also suspect io_uring can do this.

Yeah, that's unfortunate. There are several options here:

1. Do what you originally suggested - bypass to root after offline. I feel
   uneasy about this. Both iolatency and throtl clear their configs on
   offline but that's punting to the parent. For iocost it'd be bypassing
   all controls, which can actually be exploited.

2. Make all possible IO issuers use blkcg_[un]pin_online() and shift the
   iocost shutdown to pd_offline_fn(). This likely is the most canonical
   solution given the current situation but it's kinda nasty to add another
   layer of refcnting all over the place.

3. Order blkg free so that parents are never freed before children. You did
   this by adding refcnts in iocost but shouldn't it be possible to simply
   shift blkg_put(blkg->parent) in __blkg_release() to blkg_free_workfn()?

#3 seems the most logical to me. What do you thinK?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-09 18:23               ` Tejun Heo
@ 2023-01-10  1:39                 ` Yu Kuai
  2023-01-10 18:36                   ` Tejun Heo
  0 siblings, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2023-01-10  1:39 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/10 2:23, Tejun Heo 写道:
> Yeah, that's unfortunate. There are several options here:
> 
> 1. Do what you originally suggested - bypass to root after offline. I feel
>     uneasy about this. Both iolatency and throtl clear their configs on
>     offline but that's punting to the parent. For iocost it'd be bypassing
>     all controls, which can actually be exploited.
> 
> 2. Make all possible IO issuers use blkcg_[un]pin_online() and shift the
>     iocost shutdown to pd_offline_fn(). This likely is the most canonical
>     solution given the current situation but it's kinda nasty to add another
>     layer of refcnting all over the place.
> 
> 3. Order blkg free so that parents are never freed before children. You did
>     this by adding refcnts in iocost but shouldn't it be possible to simply
>     shift blkg_put(blkg->parent) in __blkg_release() to blkg_free_workfn()?

As I tried to explain before, we can make sure blkg_free() is called
in order, but blkg_free() from remove cgroup can concurrent with
deactivate policy, and we can't guarantee the order of ioc_pd_free()
that is called both from blkg_free() and blkcg_deactivate_policy().
Hence I don't think #3 is possible.

I personaly prefer #1, I don't see any real use case about the defect
that you described, and actually in cgroup v1 blk-throtl is bypassed to
no limit as well.

I'm not sure about #2, that sounds a possible solution but I'm not quite
familiar with the implementations here.

Consider that bfq already has such refcounting for bfqg, perhaps
similiar refcounting is acceptable?

Thanks,
Kuai


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-10  1:39                 ` Yu Kuai
@ 2023-01-10 18:36                   ` Tejun Heo
  2023-01-11  1:36                     ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-10 18:36 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hello,

On Tue, Jan 10, 2023 at 09:39:44AM +0800, Yu Kuai wrote:
> As I tried to explain before, we can make sure blkg_free() is called
> in order, but blkg_free() from remove cgroup can concurrent with
> deactivate policy, and we can't guarantee the order of ioc_pd_free()
> that is called both from blkg_free() and blkcg_deactivate_policy().
> Hence I don't think #3 is possible.

Hahaha, sorry that I keep forgetting that. This doesn't really feel like
that important or difficult part of the problem tho. Can't it be solved by
synchronizing blkg free work item against the deactivate path with a mutex?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-10 18:36                   ` Tejun Heo
@ 2023-01-11  1:36                     ` Yu Kuai
  2023-01-11 17:07                       ` Tejun Heo
  0 siblings, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2023-01-11  1:36 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/11 2:36, Tejun Heo 写道:
> Hello,
> 
> On Tue, Jan 10, 2023 at 09:39:44AM +0800, Yu Kuai wrote:
>> As I tried to explain before, we can make sure blkg_free() is called
>> in order, but blkg_free() from remove cgroup can concurrent with
>> deactivate policy, and we can't guarantee the order of ioc_pd_free()
>> that is called both from blkg_free() and blkcg_deactivate_policy().
>> Hence I don't think #3 is possible.
> 
> Hahaha, sorry that I keep forgetting that. This doesn't really feel like
> that important or difficult part of the problem tho. Can't it be solved by
> synchronizing blkg free work item against the deactivate path with a mutex?
> 

I'm not sure, of course this can fix the problem, but two spinlock
'blkcg->lock' and 'q->queue_lock' are used to protect blkg_destroy()
currently, add a mutex(disk level?) requires a refactor, which seems
complex to me.

Thanks,
Kuai


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-11  1:36                     ` Yu Kuai
@ 2023-01-11 17:07                       ` Tejun Heo
  2023-01-12  6:18                         ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-11 17:07 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hello,

On Wed, Jan 11, 2023 at 09:36:25AM +0800, Yu Kuai wrote:
> I'm not sure, of course this can fix the problem, but two spinlock
> 'blkcg->lock' and 'q->queue_lock' are used to protect blkg_destroy()
> currently, add a mutex(disk level?) requires a refactor, which seems
> complex to me.

The fact that the two paths can race each other already seems buggy. e.g.
What prevents them from running pd_free on the same pd twice? So, it needs
to be fixed anyway and the intention always has been that these callbacks
are called in the correct traversal order.

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-11 17:07                       ` Tejun Heo
@ 2023-01-12  6:18                         ` Yu Kuai
  2023-01-13  0:53                           ` Tejun Heo
  0 siblings, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2023-01-12  6:18 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/12 1:07, Tejun Heo 写道:
> Hello,
> 
> On Wed, Jan 11, 2023 at 09:36:25AM +0800, Yu Kuai wrote:
>> I'm not sure, of course this can fix the problem, but two spinlock
>> 'blkcg->lock' and 'q->queue_lock' are used to protect blkg_destroy()
>> currently, add a mutex(disk level?) requires a refactor, which seems
>> complex to me.
> 
> The fact that the two paths can race each other already seems buggy. e.g.
> What prevents them from running pd_free on the same pd twice? So, it needs

I think the root cause is that blkg is tracked from two different list,
blkcg->blkg_list from cgroup level and q->blkg_list from disk level. And
pd_free_fn is also called from both blkg_destroy() and deactivate policy
for a disk.

I just thought about another solution:

remove the blkcg_deactivate_policy() from rq_qos_exit() from deleting
the device, and delay the policy cleanup and free to blkg_destroy_all().
Then the policies(other than bfq) can only call pd_free_fn() from
blkg_destroy(), and it's easy to guarantee the order. For bfq, it can
stay the same since bfq has refcounting itself.

Then for the problem that ioc can be freed in pd_free_fn(), we can fix
it by freeing ioc in ioc_pd_free() for root blkg instead of
rq_qos_exit().

What do you think?

Thanks,
Kuai
> to be fixed anyway and the intention always has been that these callbacks
> are called in the correct traversal order.
> 
> Thanks.
> 


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-12  6:18                         ` Yu Kuai
@ 2023-01-13  0:53                           ` Tejun Heo
  2023-01-13  1:10                             ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-13  0:53 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hello,

On Thu, Jan 12, 2023 at 02:18:15PM +0800, Yu Kuai wrote:
> remove the blkcg_deactivate_policy() from rq_qos_exit() from deleting
> the device, and delay the policy cleanup and free to blkg_destroy_all().
> Then the policies(other than bfq) can only call pd_free_fn() from
> blkg_destroy(), and it's easy to guarantee the order. For bfq, it can
> stay the same since bfq has refcounting itself.
> 
> Then for the problem that ioc can be freed in pd_free_fn(), we can fix
> it by freeing ioc in ioc_pd_free() for root blkg instead of
> rq_qos_exit().
> 
> What do you think?

That would remove the ability to dynamically remove an rq_qos policy, right?
We don't currently do it but given that having an rq_qos registered comes
with perf overhead, it's something we might want to do in the future - e.g.
only activate the policy when the controller is actually enabled. So, idk.
What's wrong with synchronizing the two removal paths? blkcg policies are
combinations of cgroups and block device configurations, so having exit
paths from both sides is kinda natural.

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-13  0:53                           ` Tejun Heo
@ 2023-01-13  1:10                             ` Yu Kuai
  2023-01-13  1:15                               ` Tejun Heo
  0 siblings, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2023-01-13  1:10 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/13 8:53, Tejun Heo 写道:
> Hello,
> 
> On Thu, Jan 12, 2023 at 02:18:15PM +0800, Yu Kuai wrote:
>> remove the blkcg_deactivate_policy() from rq_qos_exit() from deleting
>> the device, and delay the policy cleanup and free to blkg_destroy_all().
>> Then the policies(other than bfq) can only call pd_free_fn() from
>> blkg_destroy(), and it's easy to guarantee the order. For bfq, it can
>> stay the same since bfq has refcounting itself.
>>
>> Then for the problem that ioc can be freed in pd_free_fn(), we can fix
>> it by freeing ioc in ioc_pd_free() for root blkg instead of
>> rq_qos_exit().
>>
>> What do you think?
> 
> That would remove the ability to dynamically remove an rq_qos policy, right?
> We don't currently do it but given that having an rq_qos registered comes
> with perf overhead, it's something we might want to do in the future - e.g.

Yes, that make sense, remove ioc and other policies dynamically.

> only activate the policy when the controller is actually enabled. So, idk.
> What's wrong with synchronizing the two removal paths? blkcg policies are
> combinations of cgroups and block device configurations, so having exit
> paths from both sides is kinda natural.

I still can't figure out how to synchronizing them will a mutex. Maybe
I'm being foolish...

Thanks,
Kuai


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-13  1:10                             ` Yu Kuai
@ 2023-01-13  1:15                               ` Tejun Heo
  2023-01-13  1:25                                 ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-13  1:15 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hello,

On Fri, Jan 13, 2023 at 09:10:25AM +0800, Yu Kuai wrote:
> > only activate the policy when the controller is actually enabled. So, idk.
> > What's wrong with synchronizing the two removal paths? blkcg policies are
> > combinations of cgroups and block device configurations, so having exit
> > paths from both sides is kinda natural.
> 
> I still can't figure out how to synchronizing them will a mutex. Maybe
> I'm being foolish...

Hmm... can't you just use e.g. per-bdev mutex which is grabbed by both
blkg_free_workfn() and blkcg_deactivate_policy()?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-13  1:15                               ` Tejun Heo
@ 2023-01-13  1:25                                 ` Yu Kuai
  2023-01-13 17:16                                   ` Tejun Heo
  0 siblings, 1 reply; 22+ messages in thread
From: Yu Kuai @ 2023-01-13  1:25 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/13 9:15, Tejun Heo 写道:
> Hello,
> 
> On Fri, Jan 13, 2023 at 09:10:25AM +0800, Yu Kuai wrote:
>>> only activate the policy when the controller is actually enabled. So, idk.
>>> What's wrong with synchronizing the two removal paths? blkcg policies are
>>> combinations of cgroups and block device configurations, so having exit
>>> paths from both sides is kinda natural.
>>
>> I still can't figure out how to synchronizing them will a mutex. Maybe
>> I'm being foolish...
> 
> Hmm... can't you just use e.g. per-bdev mutex which is grabbed by both
> blkg_free_workfn() and blkcg_deactivate_policy()?
> 

I think hold the lock in blkg_free_workfn() is too late, pd_free_fn()
for parent from blkcg_deactivate_policy() can be called first.

t1: remove cgroup t1/t2
blkcg_destroy_blkgs
  blkg_destroy
   percpu_ref_kill(&blkg->refcnt)
    blkg_release
     blkg_free
      schedule_work(&blkg->free_work)
      // t1 is done

t2: handle t1 from removing device
blkcg_deactivate_policy
  pd_free_fn
  // free parent
				t3: from t1
				blkg_free_workfn
				 pd_free_fn
				 // free child

Thanks,
Kuai


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-13  1:25                                 ` Yu Kuai
@ 2023-01-13 17:16                                   ` Tejun Heo
  2023-01-16  3:25                                     ` Yu Kuai
  0 siblings, 1 reply; 22+ messages in thread
From: Tejun Heo @ 2023-01-13 17:16 UTC (permalink / raw)
  To: Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hello,

On Fri, Jan 13, 2023 at 09:25:11AM +0800, Yu Kuai wrote:
> I think hold the lock in blkg_free_workfn() is too late, pd_free_fn()
> for parent from blkcg_deactivate_policy() can be called first.
> 
> t1: remove cgroup t1/t2
> blkcg_destroy_blkgs
>  blkg_destroy
>   percpu_ref_kill(&blkg->refcnt)
>    blkg_release
>     blkg_free
>      schedule_work(&blkg->free_work)
>      // t1 is done
> 
> t2: handle t1 from removing device
> blkcg_deactivate_policy
>  pd_free_fn
>  // free parent
> 				t3: from t1
> 				blkg_free_workfn
> 				 pd_free_fn
> 				 // free child

As we discussed before, you'd have to order the actual freeing by shifting
the ref puts into the free_work. If you move `blkg_put(blkg->parent)` and
`list_del_init(&blkg->q_node)` to blkg_free_workfn() (this will require
adjustments as these things are used from other places too), the free work
items will be ordered and the blkg would remain iterable - IOW,
deactivate_policy would be able to see it allowing the two paths to
synchronize, right?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/2] blk-iocost: add refcounting for iocg
  2023-01-13 17:16                                   ` Tejun Heo
@ 2023-01-16  3:25                                     ` Yu Kuai
  0 siblings, 0 replies; 22+ messages in thread
From: Yu Kuai @ 2023-01-16  3:25 UTC (permalink / raw)
  To: Tejun Heo, Yu Kuai
  Cc: hch, josef, axboe, cgroups, linux-block, linux-kernel, yi.zhang,
	yukuai (C)

Hi,

在 2023/01/14 1:16, Tejun Heo 写道:
> As we discussed before, you'd have to order the actual freeing by shifting
> the ref puts into the free_work. If you move `blkg_put(blkg->parent)` and
> `list_del_init(&blkg->q_node)` to blkg_free_workfn() (this will require
> adjustments as these things are used from other places too), the free work
> items will be ordered and the blkg would remain iterable - IOW,
> deactivate_policy would be able to see it allowing the two paths to
> synchronize, right?

That sounds reasonable to only remove blkg from queue list if
pd_free_fn() is done.

It's right this way deactivate_policy will be able to see it, and if
deactivate_policy is called first, pd_free_fn() can be called here, and
later blkg_free_workfn() should skip pd_free_fn().

It's glad that we come up with suitable solution finially. 😃

BTW, it might take some time before I send a new patchset cause Spring
Festival is coming.

Thanks,
Kuai
> 
> Thanks.
> 


^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2023-01-16  3:26 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-27 12:55 [PATCH v2 0/2] blk-iocost: add refcounting for iocg and ioc Yu Kuai
2022-12-27 12:55 ` [PATCH v2 1/2] blk-iocost: add refcounting for iocg Yu Kuai
2023-01-04 21:44   ` Tejun Heo
2023-01-05  1:14     ` Yu Kuai
2023-01-05 18:32       ` Tejun Heo
2023-01-06  1:08         ` Yu Kuai
2023-01-06 20:18           ` Tejun Heo
2023-01-09  1:32             ` Yu Kuai
2023-01-09 18:23               ` Tejun Heo
2023-01-10  1:39                 ` Yu Kuai
2023-01-10 18:36                   ` Tejun Heo
2023-01-11  1:36                     ` Yu Kuai
2023-01-11 17:07                       ` Tejun Heo
2023-01-12  6:18                         ` Yu Kuai
2023-01-13  0:53                           ` Tejun Heo
2023-01-13  1:10                             ` Yu Kuai
2023-01-13  1:15                               ` Tejun Heo
2023-01-13  1:25                                 ` Yu Kuai
2023-01-13 17:16                                   ` Tejun Heo
2023-01-16  3:25                                     ` Yu Kuai
2022-12-27 12:55 ` [PATCH v2 2/2] blk-iocost: add refcounting for ioc Yu Kuai
2023-01-04 21:45   ` Tejun Heo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).