linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] fixes for the updating nr_hw_queues
@ 2018-08-15  7:25 Jianchao Wang
  2018-08-15  7:25 ` [PATCH 1/2] blk-mq: init hctx sched after update cpu & nr_hw_queues mapping Jianchao Wang
  2018-08-15  7:25 ` [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight Jianchao Wang
  0 siblings, 2 replies; 10+ messages in thread
From: Jianchao Wang @ 2018-08-15  7:25 UTC (permalink / raw)
  To: axboe; +Cc: tom.leiming, bart.vanassche, keith.busch, linux-block, linux-kernel

Two fixes for updating nr_hw_queues.

The first patch fixes the following scenario:
Kyber depends on the mapping between cpu and nr_hw_queues. When
update nr_hw_queues, elevator_type->ops.mq.init_hctx will be
invoked before the mapping is adapted correctly, this would cause
panic in kyber.

The second patch fixes the following scenario:
part_in_flight/rw will invoke blk_mq_in_flight/rw to account the
inflight requests. It will access the queue_hw_ctx and nr_hw_queues
w/o any protection. When updating nr_hw_queues and blk_mq_in_flight
/rw occur concurrently, panic comes up.

Jianchao Wang (2)
blk-mq: init hctx sched after update cpu & nr_hw_queues
blk-mq: sync the update nr_hw_queues with part_in_flight

 block/blk-mq.c         | 40 ++++++++++++++++++++++++++++++++--------
 block/blk.h            |  2 ++
 block/elevator.c       | 20 ++++++++++++--------
 block/genhd.c          | 10 ++++++++--
 include/linux/blkdev.h |  4 ++++
 5 files changed, 58 insertions(+), 18 deletions(-)

Thanks
Jianchao

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/2] blk-mq: init hctx sched after update cpu & nr_hw_queues mapping
  2018-08-15  7:25 [PATCH 0/2] fixes for the updating nr_hw_queues Jianchao Wang
@ 2018-08-15  7:25 ` Jianchao Wang
  2018-08-15 11:32   ` Ming Lei
  2018-08-15  7:25 ` [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight Jianchao Wang
  1 sibling, 1 reply; 10+ messages in thread
From: Jianchao Wang @ 2018-08-15  7:25 UTC (permalink / raw)
  To: axboe; +Cc: tom.leiming, bart.vanassche, keith.busch, linux-block, linux-kernel

Kyber depends on the mapping between cpu and nr_hw_queues. When
update nr_hw_queues, elevator_type->ops.mq.init_hctx will be
invoked before the mapping is adapted correctly, this would cause
terrible result. A simply way to fix this is switch the io scheduler
to none before update the nr_hw_queues, and then get it back after
update nr_hw_queues. To achieve this, we add a new member elv_type
in request_queue to save the original elevator and adapt and export
elevator_switch_mq.

Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---
 block/blk-mq.c         | 37 +++++++++++++++++++++++++++++--------
 block/blk.h            |  2 ++
 block/elevator.c       | 20 ++++++++++++--------
 include/linux/blkdev.h |  3 +++
 4 files changed, 46 insertions(+), 16 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5efd789..89904cc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -112,6 +112,7 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
 	struct mq_inflight mi = { .part = part, .inflight = inflight, };
 
 	inflight[0] = inflight[1] = 0;
+
 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
 }
 
@@ -2147,8 +2148,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
 	if (set->ops->exit_request)
 		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
 
-	blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
-
 	if (set->ops->exit_hctx)
 		set->ops->exit_hctx(hctx, hctx_idx);
 
@@ -2216,12 +2215,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
 		goto free_bitmap;
 
-	if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
-		goto exit_hctx;
-
 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
 	if (!hctx->fq)
-		goto sched_exit_hctx;
+		goto exit_hctx;
 
 	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
 		goto free_fq;
@@ -2235,8 +2231,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
 
  free_fq:
 	kfree(hctx->fq);
- sched_exit_hctx:
-	blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
  exit_hctx:
 	if (set->ops->exit_hctx)
 		set->ops->exit_hctx(hctx, hctx_idx);
@@ -2913,6 +2907,25 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 	list_for_each_entry(q, &set->tag_list, tag_set_list)
 		blk_mq_freeze_queue(q);
 
+	/*
+	 * switch io scheduler to NULL to clean up the data in it.
+	 * will get it back after update mapping between cpu and hw queues.
+	 */
+	list_for_each_entry(q, &set->tag_list, tag_set_list) {
+		if (!q->elevator) {
+			q->elv_type = NULL;
+			continue;
+		}
+		q->elv_type = q->elevator->type;
+		mutex_lock(&q->sysfs_lock);
+		/*
+		 * elevator_release will put it.
+		 */
+		__module_get(q->elv_type->elevator_owner);
+		elevator_switch_mq(q, NULL);
+		mutex_unlock(&q->sysfs_lock);
+	}
+
 	set->nr_hw_queues = nr_hw_queues;
 	blk_mq_update_queue_map(set);
 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
@@ -2920,6 +2933,14 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 		blk_mq_queue_reinit(q);
 	}
 
+	list_for_each_entry(q, &set->tag_list, tag_set_list) {
+		if (!q->elv_type)
+			continue;
+
+		mutex_lock(&q->sysfs_lock);
+		elevator_switch_mq(q, q->elv_type);
+		mutex_unlock(&q->sysfs_lock);
+	}
 	list_for_each_entry(q, &set->tag_list, tag_set_list)
 		blk_mq_unfreeze_queue(q);
 }
diff --git a/block/blk.h b/block/blk.h
index d4d67e9..0c9bc8d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -234,6 +234,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
 
 int elevator_init(struct request_queue *);
 int elevator_init_mq(struct request_queue *q);
+int elevator_switch_mq(struct request_queue *q,
+			      struct elevator_type *new_e);
 void elevator_exit(struct request_queue *, struct elevator_queue *);
 int elv_register_queue(struct request_queue *q);
 void elv_unregister_queue(struct request_queue *q);
diff --git a/block/elevator.c b/block/elevator.c
index fa828b5..5ea6e7d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -933,16 +933,13 @@ void elv_unregister(struct elevator_type *e)
 }
 EXPORT_SYMBOL_GPL(elv_unregister);
 
-static int elevator_switch_mq(struct request_queue *q,
+int elevator_switch_mq(struct request_queue *q,
 			      struct elevator_type *new_e)
 {
 	int ret;
 
 	lockdep_assert_held(&q->sysfs_lock);
 
-	blk_mq_freeze_queue(q);
-	blk_mq_quiesce_queue(q);
-
 	if (q->elevator) {
 		if (q->elevator->registered)
 			elv_unregister_queue(q);
@@ -968,8 +965,6 @@ static int elevator_switch_mq(struct request_queue *q,
 		blk_add_trace_msg(q, "elv switch: none");
 
 out:
-	blk_mq_unquiesce_queue(q);
-	blk_mq_unfreeze_queue(q);
 	return ret;
 }
 
@@ -1021,8 +1016,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
 	lockdep_assert_held(&q->sysfs_lock);
 
-	if (q->mq_ops)
-		return elevator_switch_mq(q, new_e);
+	if (q->mq_ops) {
+		blk_mq_freeze_queue(q);
+		blk_mq_quiesce_queue(q);
+
+		err = elevator_switch_mq(q, new_e);
+
+		blk_mq_unquiesce_queue(q);
+		blk_mq_unfreeze_queue(q);
+
+		return err;
+	}
 
 	/*
 	 * Turn on BYPASS and drain all requests w/ elevator private data.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d6869e0..ee930c4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -437,6 +437,9 @@ struct request_queue {
 	struct list_head	queue_head;
 	struct request		*last_merge;
 	struct elevator_queue	*elevator;
+
+	/* used when update nr_hw_queues */
+	struct elevator_type	*elv_type;
 	int			nr_rqs[2];	/* # allocated [a]sync rqs */
 	int			nr_rqs_elvpriv;	/* # allocated rqs w/ elvpriv */
 
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight
  2018-08-15  7:25 [PATCH 0/2] fixes for the updating nr_hw_queues Jianchao Wang
  2018-08-15  7:25 ` [PATCH 1/2] blk-mq: init hctx sched after update cpu & nr_hw_queues mapping Jianchao Wang
@ 2018-08-15  7:25 ` Jianchao Wang
  2018-08-16  4:50   ` Ming Lei
  1 sibling, 1 reply; 10+ messages in thread
From: Jianchao Wang @ 2018-08-15  7:25 UTC (permalink / raw)
  To: axboe; +Cc: tom.leiming, bart.vanassche, keith.busch, linux-block, linux-kernel

For blk-mq, part_in_flight/rw will invoke blk_mq_in_flight/rw to
account the inflight requests. It will access the queue_hw_ctx and
nr_hw_queues w/o any protection. When updating nr_hw_queues and
blk_mq_in_flight/rw occur concurrently, panic comes up.
To fix it, introduce mq_realloc_hw_ctxs into mark the updating
nr_hw_queues process and use rcu to ensure the mq_realloc_hw_ctxs
globally visible.

Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---
 block/blk-mq.c         |  3 +++
 block/genhd.c          | 10 ++++++++--
 include/linux/blkdev.h |  1 +
 3 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 89904cc..ff50afc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2517,6 +2517,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 
 	blk_mq_sysfs_unregister(q);
 
+	WRITE_ONCE(q->mq_realloc_hw_ctxs, true);
+	synchronize_rcu();
 	/* protect against switching io scheduler  */
 	mutex_lock(&q->sysfs_lock);
 	for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2564,6 +2566,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 	}
 	q->nr_hw_queues = i;
 	mutex_unlock(&q->sysfs_lock);
+	WRITE_ONCE(q->mq_realloc_hw_ctxs, false);
 	blk_mq_sysfs_register(q);
 }
 
diff --git a/block/genhd.c b/block/genhd.c
index 8cc719a3..f9e46aa 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -69,7 +69,10 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
 		    unsigned int inflight[2])
 {
 	if (q->mq_ops) {
-		blk_mq_in_flight(q, part, inflight);
+		rcu_read_lock();
+		if (!q->mq_realloc_hw_ctxs)
+			blk_mq_in_flight(q, part, inflight);
+		rcu_read_unlock();
 		return;
 	}
 
@@ -86,7 +89,10 @@ void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
 		       unsigned int inflight[2])
 {
 	if (q->mq_ops) {
-		blk_mq_in_flight_rw(q, part, inflight);
+		rcu_read_lock();
+		if (!q->mq_realloc_hw_ctxs)
+			blk_mq_in_flight_rw(q, part, inflight);
+		rcu_read_unlock();
 		return;
 	}
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ee930c4..5cb6662 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -662,6 +662,7 @@ struct request_queue {
 #endif
 
 	bool			mq_sysfs_init_done;
+	bool			mq_realloc_hw_ctxs;
 
 	size_t			cmd_size;
 	void			*rq_alloc_data;
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] blk-mq: init hctx sched after update cpu & nr_hw_queues mapping
  2018-08-15  7:25 ` [PATCH 1/2] blk-mq: init hctx sched after update cpu & nr_hw_queues mapping Jianchao Wang
@ 2018-08-15 11:32   ` Ming Lei
  2018-08-16  9:52     ` jianchao.wang
  0 siblings, 1 reply; 10+ messages in thread
From: Ming Lei @ 2018-08-15 11:32 UTC (permalink / raw)
  To: Jianchao Wang
  Cc: Jens Axboe, Bart Van Assche, Keith Busch, linux-block,
	Linux Kernel Mailing List

On Wed, Aug 15, 2018 at 3:25 PM, Jianchao Wang
<jianchao.w.wang@oracle.com> wrote:
> Kyber depends on the mapping between cpu and nr_hw_queues. When
> update nr_hw_queues, elevator_type->ops.mq.init_hctx will be
> invoked before the mapping is adapted correctly, this would cause
> terrible result. A simply way to fix this is switch the io scheduler
> to none before update the nr_hw_queues, and then get it back after
> update nr_hw_queues. To achieve this, we add a new member elv_type
> in request_queue to save the original elevator and adapt and export
> elevator_switch_mq.
>
> Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
> ---
>  block/blk-mq.c         | 37 +++++++++++++++++++++++++++++--------
>  block/blk.h            |  2 ++
>  block/elevator.c       | 20 ++++++++++++--------
>  include/linux/blkdev.h |  3 +++
>  4 files changed, 46 insertions(+), 16 deletions(-)
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 5efd789..89904cc 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -112,6 +112,7 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
>         struct mq_inflight mi = { .part = part, .inflight = inflight, };
>
>         inflight[0] = inflight[1] = 0;
> +

Not necessary to do that.

>         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
>  }
>
> @@ -2147,8 +2148,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
>         if (set->ops->exit_request)
>                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
>
> -       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
> -
>         if (set->ops->exit_hctx)
>                 set->ops->exit_hctx(hctx, hctx_idx);
>
> @@ -2216,12 +2215,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
>             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
>                 goto free_bitmap;
>
> -       if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
> -               goto exit_hctx;
> -
>         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
>         if (!hctx->fq)
> -               goto sched_exit_hctx;
> +               goto exit_hctx;
>
>         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
>                 goto free_fq;
> @@ -2235,8 +2231,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
>
>   free_fq:
>         kfree(hctx->fq);
> - sched_exit_hctx:
> -       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);

Seems both blk_mq_sched_init_hctx() and blk_mq_sched_exit_hctx() may be
removed now.

>   exit_hctx:
>         if (set->ops->exit_hctx)
>                 set->ops->exit_hctx(hctx, hctx_idx);
> @@ -2913,6 +2907,25 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
>         list_for_each_entry(q, &set->tag_list, tag_set_list)
>                 blk_mq_freeze_queue(q);
>
> +       /*
> +        * switch io scheduler to NULL to clean up the data in it.
> +        * will get it back after update mapping between cpu and hw queues.
> +        */
> +       list_for_each_entry(q, &set->tag_list, tag_set_list) {
> +               if (!q->elevator) {
> +                       q->elv_type = NULL;
> +                       continue;
> +               }
> +               q->elv_type = q->elevator->type;
> +               mutex_lock(&q->sysfs_lock);
> +               /*
> +                * elevator_release will put it.
> +                */
> +               __module_get(q->elv_type->elevator_owner);
> +               elevator_switch_mq(q, NULL);
> +               mutex_unlock(&q->sysfs_lock);
> +       }
> +
>         set->nr_hw_queues = nr_hw_queues;
>         blk_mq_update_queue_map(set);
>         list_for_each_entry(q, &set->tag_list, tag_set_list) {
> @@ -2920,6 +2933,14 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
>                 blk_mq_queue_reinit(q);
>         }
>
> +       list_for_each_entry(q, &set->tag_list, tag_set_list) {
> +               if (!q->elv_type)
> +                       continue;
> +
> +               mutex_lock(&q->sysfs_lock);
> +               elevator_switch_mq(q, q->elv_type);
> +               mutex_unlock(&q->sysfs_lock);
> +       }

BFQ defines .init_hctx() too, so seems this generic approach is correct way for
this issue.

thanks,
Ming Lei

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight
  2018-08-15  7:25 ` [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight Jianchao Wang
@ 2018-08-16  4:50   ` Ming Lei
  2018-08-16  8:29     ` jianchao.wang
  0 siblings, 1 reply; 10+ messages in thread
From: Ming Lei @ 2018-08-16  4:50 UTC (permalink / raw)
  To: Jianchao Wang
  Cc: Jens Axboe, Bart Van Assche, Keith Busch, linux-block,
	Linux Kernel Mailing List

On Wed, Aug 15, 2018 at 3:25 PM, Jianchao Wang
<jianchao.w.wang@oracle.com> wrote:
> For blk-mq, part_in_flight/rw will invoke blk_mq_in_flight/rw to
> account the inflight requests. It will access the queue_hw_ctx and
> nr_hw_queues w/o any protection. When updating nr_hw_queues and
> blk_mq_in_flight/rw occur concurrently, panic comes up.

When updating nr_hw_queues, all queues are frozen, and there shouldn't
be any inflight requests, so there shouldn't be such issue.


Thanks,
Ming Lei

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight
  2018-08-16  4:50   ` Ming Lei
@ 2018-08-16  8:29     ` jianchao.wang
  2018-08-16  9:03       ` Ming Lei
  0 siblings, 1 reply; 10+ messages in thread
From: jianchao.wang @ 2018-08-16  8:29 UTC (permalink / raw)
  To: Ming Lei
  Cc: Jens Axboe, Bart Van Assche, Keith Busch, linux-block,
	Linux Kernel Mailing List



On 08/16/2018 12:50 PM, Ming Lei wrote:
> On Wed, Aug 15, 2018 at 3:25 PM, Jianchao Wang
> <jianchao.w.wang@oracle.com> wrote:
>> For blk-mq, part_in_flight/rw will invoke blk_mq_in_flight/rw to
>> account the inflight requests. It will access the queue_hw_ctx and
>> nr_hw_queues w/o any protection. When updating nr_hw_queues and
>> blk_mq_in_flight/rw occur concurrently, panic comes up.
> 
> When updating nr_hw_queues, all queues are frozen, and there shouldn't
> be any inflight requests, so there shouldn't be such issue.
> 

I get following crash when do updating nr_hw_queues test.

[  112.643189] BUG: unable to handle kernel NULL pointer dereference at 0000000000000174
[  112.643275] PGD 40baf8067 P4D 40baf8067 PUD 40bb38067 PMD 0 
[  112.643334] Oops: 0000 [#1] PREEMPT SMP
[  112.643372] CPU: 7 PID: 1526 Comm: fio Kdump: loaded Not tainted 4.18.0-rc6+ #250
[  112.643434] Hardware name: LENOVO 10MLS0E339/3106, BIOS M1AKT22A 06/27/2017
[  112.643499] RIP: 0010:blk_mq_queue_tag_busy_iter+0x4d/0x250
[  112.643548] Code: 48 89 54 24 20 c7 44 24 0c 00 00 00 00 85 c9 0f 84 25 01 00 00 48 8b 7c 24 10 48 63 44 24 0c 48 8b 97 88 01 00 00 4c 8b 34 c2 <41> 8b 96 74 01 00 00 4d 8b a6 e8 01 00 00 85 d2 0f 84 e0 00 00 00 
[  112.643791] RSP: 0018:ffff95708284fc70 EFLAGS: 00010202
[  112.643840] RAX: 0000000000000002 RBX: ffff895b49570e18 RCX: 00000000000000ff
[  112.643899] RDX: ffff895b4fc1d6c0 RSI: ffffffff8b6a94b0 RDI: ffff895b49570e18
[  112.643961] RBP: 000000000000001f R08: 0000000000000000 R09: 0000000000000000
[  112.644021] R10: ffff95708284fcd8 R11: ffffffff8b6b547d R12: ffff895b4fc30b40
[  112.644081] R13: 0000000000000000 R14: 0000000000000000 R15: ffff895b48c96c40
[  112.644144] FS:  00007fa79fd4c700(0000) GS:ffff895b62dc0000(0000) knlGS:0000000000000000
[  112.644212] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  112.644270] CR2: 0000000000000174 CR3: 000000040b9ae001 CR4: 00000000003606e0
[  112.644357] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[  112.644459] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[  112.644552] Call Trace:
[  112.644603]  ? blk_mq_stop_hw_queues+0x50/0x50
[  112.644675]  ? blk_mq_stop_hw_queues+0x50/0x50
[  112.644748]  blk_mq_in_flight+0x2e/0x40
[  112.644792]  part_round_stats+0x158/0x160
[  112.644835]  part_stat_show+0x9c/0x530
[  112.644873]  ? lock_acquire+0xab/0x200
[  112.644913]  ? kernfs_seq_start+0x32/0x90
[  112.644959]  dev_attr_show+0x19/0x50
[  112.644996]  sysfs_kf_seq_show+0xad/0x100
[  112.645039]  seq_read+0xa5/0x410
[  112.645075]  ? __mutex_lock+0x20e/0x990
[  112.645117]  __vfs_read+0x23/0x160
[  112.645158]  vfs_read+0xa0/0x140
[  112.645193]  ksys_read+0x45/0xa0
[  112.645230]  do_syscall_64+0x5a/0x1a0
[  112.645267]  entry_SYSCALL_64_after_hwframe+0x49/0xbe


The blk_mq_in_flight will access the queue_hw_ctx and nr_hw_queues when updating nr_hw_queues is ongoing.

Thanks
Jianchao

> 
> Thanks,
> Ming Lei
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight
  2018-08-16  8:29     ` jianchao.wang
@ 2018-08-16  9:03       ` Ming Lei
  2018-08-16  9:20         ` jianchao.wang
  0 siblings, 1 reply; 10+ messages in thread
From: Ming Lei @ 2018-08-16  9:03 UTC (permalink / raw)
  To: jianchao.wang
  Cc: Ming Lei, Jens Axboe, Bart Van Assche, Keith Busch, linux-block,
	Linux Kernel Mailing List

On Thu, Aug 16, 2018 at 04:29:33PM +0800, jianchao.wang wrote:
> 
> 
> On 08/16/2018 12:50 PM, Ming Lei wrote:
> > On Wed, Aug 15, 2018 at 3:25 PM, Jianchao Wang
> > <jianchao.w.wang@oracle.com> wrote:
> >> For blk-mq, part_in_flight/rw will invoke blk_mq_in_flight/rw to
> >> account the inflight requests. It will access the queue_hw_ctx and
> >> nr_hw_queues w/o any protection. When updating nr_hw_queues and
> >> blk_mq_in_flight/rw occur concurrently, panic comes up.
> > 
> > When updating nr_hw_queues, all queues are frozen, and there shouldn't
> > be any inflight requests, so there shouldn't be such issue.
> > 
> 
> I get following crash when do updating nr_hw_queues test.
> 
> [  112.643189] BUG: unable to handle kernel NULL pointer dereference at 0000000000000174
> [  112.643275] PGD 40baf8067 P4D 40baf8067 PUD 40bb38067 PMD 0 
> [  112.643334] Oops: 0000 [#1] PREEMPT SMP
> [  112.643372] CPU: 7 PID: 1526 Comm: fio Kdump: loaded Not tainted 4.18.0-rc6+ #250
> [  112.643434] Hardware name: LENOVO 10MLS0E339/3106, BIOS M1AKT22A 06/27/2017
> [  112.643499] RIP: 0010:blk_mq_queue_tag_busy_iter+0x4d/0x250
> [  112.643548] Code: 48 89 54 24 20 c7 44 24 0c 00 00 00 00 85 c9 0f 84 25 01 00 00 48 8b 7c 24 10 48 63 44 24 0c 48 8b 97 88 01 00 00 4c 8b 34 c2 <41> 8b 96 74 01 00 00 4d 8b a6 e8 01 00 00 85 d2 0f 84 e0 00 00 00 
> [  112.643791] RSP: 0018:ffff95708284fc70 EFLAGS: 00010202
> [  112.643840] RAX: 0000000000000002 RBX: ffff895b49570e18 RCX: 00000000000000ff
> [  112.643899] RDX: ffff895b4fc1d6c0 RSI: ffffffff8b6a94b0 RDI: ffff895b49570e18
> [  112.643961] RBP: 000000000000001f R08: 0000000000000000 R09: 0000000000000000
> [  112.644021] R10: ffff95708284fcd8 R11: ffffffff8b6b547d R12: ffff895b4fc30b40
> [  112.644081] R13: 0000000000000000 R14: 0000000000000000 R15: ffff895b48c96c40
> [  112.644144] FS:  00007fa79fd4c700(0000) GS:ffff895b62dc0000(0000) knlGS:0000000000000000
> [  112.644212] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [  112.644270] CR2: 0000000000000174 CR3: 000000040b9ae001 CR4: 00000000003606e0
> [  112.644357] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> [  112.644459] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
> [  112.644552] Call Trace:
> [  112.644603]  ? blk_mq_stop_hw_queues+0x50/0x50
> [  112.644675]  ? blk_mq_stop_hw_queues+0x50/0x50
> [  112.644748]  blk_mq_in_flight+0x2e/0x40
> [  112.644792]  part_round_stats+0x158/0x160
> [  112.644835]  part_stat_show+0x9c/0x530
> [  112.644873]  ? lock_acquire+0xab/0x200
> [  112.644913]  ? kernfs_seq_start+0x32/0x90
> [  112.644959]  dev_attr_show+0x19/0x50
> [  112.644996]  sysfs_kf_seq_show+0xad/0x100
> [  112.645039]  seq_read+0xa5/0x410
> [  112.645075]  ? __mutex_lock+0x20e/0x990
> [  112.645117]  __vfs_read+0x23/0x160
> [  112.645158]  vfs_read+0xa0/0x140
> [  112.645193]  ksys_read+0x45/0xa0
> [  112.645230]  do_syscall_64+0x5a/0x1a0
> [  112.645267]  entry_SYSCALL_64_after_hwframe+0x49/0xbe
> 
> 
> The blk_mq_in_flight will access the queue_hw_ctx and nr_hw_queues when updating nr_hw_queues is ongoing.
> 

Sorry for missing this use case, then what do you think of the following fix?

diff --git a/block/blk-mq.c b/block/blk-mq.c
index b42a2c9ba00e..fbc5534f8178 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -113,6 +113,10 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
 	struct mq_inflight mi = { .part = part, .inflight = inflight, };
 
 	inflight[0] = inflight[1] = 0;
+
+	if (percpu_ref_is_dying(&q->q_usage_counter))
+		return;
+
 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
 }
 

Thanks,
Ming

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight
  2018-08-16  9:03       ` Ming Lei
@ 2018-08-16  9:20         ` jianchao.wang
  2018-08-16  9:38           ` Ming Lei
  0 siblings, 1 reply; 10+ messages in thread
From: jianchao.wang @ 2018-08-16  9:20 UTC (permalink / raw)
  To: Ming Lei
  Cc: Ming Lei, Jens Axboe, Bart Van Assche, Keith Busch, linux-block,
	Linux Kernel Mailing List

Hi Ming

On 08/16/2018 05:03 PM, Ming Lei wrote:
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index b42a2c9ba00e..fbc5534f8178 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -113,6 +113,10 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
>  	struct mq_inflight mi = { .part = part, .inflight = inflight, };
>  
>  	inflight[0] = inflight[1] = 0;
> +
> +	if (percpu_ref_is_dying(&q->q_usage_counter))
> +		return;
> +
>  	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
>  }

That's a good idea to use q->q_usage_counter.
But I think we could do following modification:
1. use percpu_ref_is_zero, then we will not miss any in-flight request here.
2. use rcu to ensure the user of blk_mq_in_flight has gone out of the critical section.

Like following patch:
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 89904cc..cd9878e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -113,7 +113,12 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
 
        inflight[0] = inflight[1] = 0;
 
+       rcu_read_lock();
+       if (percpu_ref_is_zero(&q->q_usage_counter))
+               return;
+
        blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+       rcu_read_unlock();
 }
 
 static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
@@ -2907,6 +2912,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_freeze_queue(q);
 
+       synchronize_rcu();
        /*
         * switch io scheduler to NULL to clean up the data in it.
         * will get it back after update mapping between cpu and hw queues.

And also, some comment is needed to describe them. ;)

Thanks
Jianchao

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight
  2018-08-16  9:20         ` jianchao.wang
@ 2018-08-16  9:38           ` Ming Lei
  0 siblings, 0 replies; 10+ messages in thread
From: Ming Lei @ 2018-08-16  9:38 UTC (permalink / raw)
  To: jianchao.wang
  Cc: Ming Lei, Jens Axboe, Bart Van Assche, Keith Busch, linux-block,
	Linux Kernel Mailing List

On Thu, Aug 16, 2018 at 05:20:50PM +0800, jianchao.wang wrote:
> Hi Ming
> 
> On 08/16/2018 05:03 PM, Ming Lei wrote:
> > diff --git a/block/blk-mq.c b/block/blk-mq.c
> > index b42a2c9ba00e..fbc5534f8178 100644
> > --- a/block/blk-mq.c
> > +++ b/block/blk-mq.c
> > @@ -113,6 +113,10 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
> >  	struct mq_inflight mi = { .part = part, .inflight = inflight, };
> >  
> >  	inflight[0] = inflight[1] = 0;
> > +
> > +	if (percpu_ref_is_dying(&q->q_usage_counter))
> > +		return;
> > +
> >  	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
> >  }
> 
> That's a good idea to use q->q_usage_counter.
> But I think we could do following modification:
> 1. use percpu_ref_is_zero, then we will not miss any in-flight request here.
> 2. use rcu to ensure the user of blk_mq_in_flight has gone out of the critical section.
> Like following patch:
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 89904cc..cd9878e 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -113,7 +113,12 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
>  
>         inflight[0] = inflight[1] = 0;
>  
> +       rcu_read_lock();
> +       if (percpu_ref_is_zero(&q->q_usage_counter))
> +               return;
> +
>         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
> +       rcu_read_unlock();
>  }
>  
>  static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
> @@ -2907,6 +2912,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
>         list_for_each_entry(q, &set->tag_list, tag_set_list)
>                 blk_mq_freeze_queue(q);
>  
> +       synchronize_rcu();
>         /*
>          * switch io scheduler to NULL to clean up the data in it.
>          * will get it back after update mapping between cpu and hw queues.
> 
> And also, some comment is needed to describe them. ;)

This patch looks fine for me.

Thanks
Ming

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] blk-mq: init hctx sched after update cpu & nr_hw_queues mapping
  2018-08-15 11:32   ` Ming Lei
@ 2018-08-16  9:52     ` jianchao.wang
  0 siblings, 0 replies; 10+ messages in thread
From: jianchao.wang @ 2018-08-16  9:52 UTC (permalink / raw)
  To: Ming Lei
  Cc: Jens Axboe, Bart Van Assche, Keith Busch, linux-block,
	Linux Kernel Mailing List


On 08/15/2018 07:32 PM, Ming Lei wrote:
> On Wed, Aug 15, 2018 at 3:25 PM, Jianchao Wang
> <jianchao.w.wang@oracle.com> wrote:
>> Kyber depends on the mapping between cpu and nr_hw_queues. When
>> update nr_hw_queues, elevator_type->ops.mq.init_hctx will be
>> invoked before the mapping is adapted correctly, this would cause
>> terrible result. A simply way to fix this is switch the io scheduler
>> to none before update the nr_hw_queues, and then get it back after
>> update nr_hw_queues. To achieve this, we add a new member elv_type
>> in request_queue to save the original elevator and adapt and export
>> elevator_switch_mq.
>>
>> Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
>> ---
>>  block/blk-mq.c         | 37 +++++++++++++++++++++++++++++--------
>>  block/blk.h            |  2 ++
>>  block/elevator.c       | 20 ++++++++++++--------
>>  include/linux/blkdev.h |  3 +++
>>  4 files changed, 46 insertions(+), 16 deletions(-)
>>
>> diff --git a/block/blk-mq.c b/block/blk-mq.c
>> index 5efd789..89904cc 100644
>> --- a/block/blk-mq.c
>> +++ b/block/blk-mq.c
>> @@ -112,6 +112,7 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
>>         struct mq_inflight mi = { .part = part, .inflight = inflight, };
>>
>>         inflight[0] = inflight[1] = 0;
>> +
> 
> Not necessary to do that.

Yes, I will discard this.

> 
>>         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
>>  }
>>
>> @@ -2147,8 +2148,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
>>         if (set->ops->exit_request)
>>                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
>>
>> -       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
>> -
>>         if (set->ops->exit_hctx)
>>                 set->ops->exit_hctx(hctx, hctx_idx);
>>
>> @@ -2216,12 +2215,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
>>             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
>>                 goto free_bitmap;
>>
>> -       if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
>> -               goto exit_hctx;
>> -
>>         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
>>         if (!hctx->fq)
>> -               goto sched_exit_hctx;
>> +               goto exit_hctx;
>>
>>         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
>>                 goto free_fq;
>> @@ -2235,8 +2231,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
>>
>>   free_fq:
>>         kfree(hctx->fq);
>> - sched_exit_hctx:
>> -       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
> 
> Seems both blk_mq_sched_init_hctx() and blk_mq_sched_exit_hctx() may be
> removed now.

Yes, I will remove them in V2.

Thanks
Jianchao


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2018-08-16  9:53 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-15  7:25 [PATCH 0/2] fixes for the updating nr_hw_queues Jianchao Wang
2018-08-15  7:25 ` [PATCH 1/2] blk-mq: init hctx sched after update cpu & nr_hw_queues mapping Jianchao Wang
2018-08-15 11:32   ` Ming Lei
2018-08-16  9:52     ` jianchao.wang
2018-08-15  7:25 ` [PATCH 2/2] blk-mq: sync the update nr_hw_queues with part_in_flight Jianchao Wang
2018-08-16  4:50   ` Ming Lei
2018-08-16  8:29     ` jianchao.wang
2018-08-16  9:03       ` Ming Lei
2018-08-16  9:20         ` jianchao.wang
2018-08-16  9:38           ` Ming Lei

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).