linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: QiuLaibin <qiulaibin@huawei.com>
To: <axboe@kernel.dk>, <linux-kernel@vger.kernel.org>,
	<linux-block@vger.kernel.org>
Cc: <martin.petersen@oracle.com>, <ming.lei@redhat.com>,
	<hare@suse.de>, <asml.silence@gmail.com>, <bvanassche@acm.org>
Subject: [PATCH -next] blk-mq: fix tag_get wait task can't be awakened
Date: Thu, 16 Sep 2021 22:13:54 +0800	[thread overview]
Message-ID: <8812a7f9-462c-a417-fc17-eb359b22f2a9@huawei.com> (raw)
In-Reply-To: <16d831ec8e624fb5acb7ad8f2dc0b7bf@huawei.com>

ping...

On 2021/9/16 22:10, qiulaibin wrote:
> When multiple hctx share one tagset. The wake_batch is calculated during initialization by queue_depth. But when multiple hctx share one tagset. The queue depth assigned to each user may be smaller than wakup_batch. This may cause the waiting queue to fail to wakup and leads to Hang.
>
> Fix this by recalculating wake_batch when inc or dec active_queues.
>
> Fixes: 0d2602ca30e41 ("blk-mq: improve support for shared tags maps")
> Signed-off-by: Laibin Qiu <qiulaibin@huawei.com>
> ---
>   block/blk-mq-tag.c      | 44 +++++++++++++++++++++++++++++++++++++++--
>   include/linux/sbitmap.h |  8 ++++++++
>   lib/sbitmap.c           |  3 ++-
>   3 files changed, 52 insertions(+), 3 deletions(-)
>
> diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 86f87346232a..d02f5ac0004c 100644
> --- a/block/blk-mq-tag.c
> +++ b/block/blk-mq-tag.c
> @@ -16,6 +16,27 @@
>   #include "blk-mq-sched.h"
>   #include "blk-mq-tag.h"
>   
> +static void bt_update_wake_batch(struct sbitmap_queue *bt, unsigned int
> +users) {
> +	unsigned int depth;
> +
> +	depth = max((bt->sb.depth + users - 1) / users, 4U);
> +	sbitmap_queue_update_wake_batch(bt, depth); }
> +
> +/*
> + * Recalculate wakeup batch when tag is shared by hctx.
> + */
> +static void blk_mq_update_wake_batch(struct sbitmap_queue *bitmap_tags,
> +		struct sbitmap_queue *breserved_tags, unsigned int users) {
> +	if (!users)
> +		return;
> +
> +	bt_update_wake_batch(bitmap_tags, users);
> +	bt_update_wake_batch(breserved_tags, users); }
> +
>   /*
>    * If a previously inactive queue goes active, bump the active user count.
>    * We need to do this before try to allocate driver tag, then even if fail @@ -24,17 +45,29 @@
>    */
>   bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)  {
> +	unsigned int users;
> +
>   	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
>   		struct request_queue *q = hctx->queue;
>   		struct blk_mq_tag_set *set = q->tag_set;
>   
>   		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
> -		    !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
> +		    !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) {
>   			atomic_inc(&set->active_queues_shared_sbitmap);
> +
> +			users = atomic_read(&set->active_queues_shared_sbitmap);
> +			blk_mq_update_wake_batch(&set->__bitmap_tags,
> +					&set->__breserved_tags, users);
> +		}
>   	} else {
>   		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
> -		    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
> +		    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) {
>   			atomic_inc(&hctx->tags->active_queues);
> +
> +			users = atomic_read(&hctx->tags->active_queues);
> +			blk_mq_update_wake_batch(&hctx->tags->__bitmap_tags,
> +					&hctx->tags->__breserved_tags, users);
> +		}
>   	}
>   
>   	return true;
> @@ -59,16 +92,23 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
>   	struct blk_mq_tags *tags = hctx->tags;
>   	struct request_queue *q = hctx->queue;
>   	struct blk_mq_tag_set *set = q->tag_set;
> +	unsigned int users;
>   
>   	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
>   		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
>   					&q->queue_flags))
>   			return;
>   		atomic_dec(&set->active_queues_shared_sbitmap);
> +		users = atomic_read(&set->active_queues_shared_sbitmap);
> +		blk_mq_update_wake_batch(&set->__bitmap_tags,
> +				&set->__breserved_tags, users);
>   	} else {
>   		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
>   			return;
>   		atomic_dec(&tags->active_queues);
> +		users = atomic_read(&tags->active_queues);
> +		blk_mq_update_wake_batch(&tags->__bitmap_tags,
> +				&tags->__breserved_tags, users);
>   	}
>   
>   	blk_mq_tag_wakeup_all(tags, false);
> diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 2713e689ad66..d49e4f054bfe 100644
> --- a/include/linux/sbitmap.h
> +++ b/include/linux/sbitmap.h
> @@ -406,6 +406,14 @@ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
>   	sbitmap_free(&sbq->sb);
>   }
>   
> +/**
> + * sbitmap_queue_update_wake_batch() - Recalucate wake batch.
> + * @sbq: Bitmap queue.
> + * @depth: New number of queue depth.
> + */
> +void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
> +				     unsigned int depth);
> +
>   /**
>    * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
>    * @sbq: Bitmap queue to resize.
> diff --git a/lib/sbitmap.c b/lib/sbitmap.c index b25db9be938a..bbe1d663763f 100644
> --- a/lib/sbitmap.c
> +++ b/lib/sbitmap.c
> @@ -457,7 +457,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,  }  EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
>   
> -static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
> +void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
>   					    unsigned int depth)
>   {
>   	unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); @@ -475,6 +475,7 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
>   			atomic_set(&sbq->ws[i].wait_cnt, 1);
>   	}
>   }
> +EXPORT_SYMBOL_GPL(sbitmap_queue_update_wake_batch);
>   
>   void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)  {
> --
> 2.22.0
>
> .

       reply	other threads:[~2021-09-16 14:14 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <16d831ec8e624fb5acb7ad8f2dc0b7bf@huawei.com>
2021-09-16 14:13 ` QiuLaibin [this message]
     [not found] <dddba25b-82d3-39c7-a58d-9d2b1adda8ae@huawei.com>
2021-11-10  7:33 ` [PATCH -next] blk-mq: fix tag_get wait task can't be awakened QiuLaibin
2021-09-13  8:12 Laibin Qiu
2021-09-26 12:47 ` Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8812a7f9-462c-a417-fc17-eb359b22f2a9@huawei.com \
    --to=qiulaibin@huawei.com \
    --cc=asml.silence@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=bvanassche@acm.org \
    --cc=hare@suse.de \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=ming.lei@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).