linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: John Garry <john.garry@huawei.com>
To: Hannes Reinecke <hare@suse.de>,
	"Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Jens Axboe <axboe@kernel.dk>, Christoph Hellwig <hch@lst.de>,
	"James Bottomley" <james.bottomley@hansenpartnership.com>,
	Ming Lei <ming.lei@redhat.com>, <linux-scsi@vger.kernel.org>,
	<linux-block@vger.kernel.org>
Subject: Re: [PATCH 04/11] blk-mq: Facilitate a shared sbitmap per tagset
Date: Tue, 3 Dec 2019 14:54:30 +0000	[thread overview]
Message-ID: <ab7555b2-2e95-6fb1-2e44-fe3a323a24e4@huawei.com> (raw)
In-Reply-To: <20191202153914.84722-5-hare@suse.de>

>   
> @@ -483,8 +483,8 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
>   	res = mutex_lock_interruptible(&q->sysfs_lock);
>   	if (res)
>   		goto out;
> -	if (hctx->tags)
> -		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
> +	if (hctx->tags) /* We should just iterate the relevant bits for this hctx FIXME */

Bart's solution to this problem seemed ok, if he doesn't mind us 
borrowing his idea:

https://lore.kernel.org/linux-block/5183ab13-0c81-95f0-95ba-40318569c6c6@huawei.com/T/#m24394fe70b1ea79a154dfd9620f5e553c3e7e7da

See hctx_tags_bitmap_show().

It might be also reasonable to put that in another follow on patch, as 
there would be no enablers of the "shared" bitmap until later patches.

> +		sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m);
>   	mutex_unlock(&q->sysfs_lock);
>   
>   out:
> @@ -518,7 +518,7 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
>   	if (res)
>   		goto out;
>   	if (hctx->sched_tags)
> -		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
> +		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m);
>   	mutex_unlock(&q->sysfs_lock);
>   
>   out:
> diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
> index ca22afd47b3d..1855f8f5edd4 100644
> --- a/block/blk-mq-sched.c
> +++ b/block/blk-mq-sched.c
> @@ -492,6 +492,7 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
>   
>   int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
>   {
> +	struct blk_mq_tag_set *tag_set = q->tag_set;
>   	struct blk_mq_hw_ctx *hctx;
>   	struct elevator_queue *eq;
>   	unsigned int i;
> @@ -537,6 +538,19 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
>   		blk_mq_debugfs_register_sched_hctx(q, hctx);
>   	}
>   
> +	if (blk_mq_is_sbitmap_shared(tag_set)) {
> +		if (!blk_mq_init_sched_shared_sbitmap(tag_set, q->nr_requests)) {
> +			ret = -ENOMEM;
> +			goto err;
> +		}
> +		queue_for_each_hw_ctx(q, hctx, i) {
> +			struct blk_mq_tags *tags = hctx->sched_tags;
> +
> +			tags->bitmap_tags = &tag_set->__sched_bitmap_tags;
> +			tags->breserved_tags = &tag_set->__sched_breserved_tags;
> +		}
> +	}
> +
>   	return 0;
>   
>   err:
> diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> index f5009587e1b5..2e714123e846 100644
> --- a/block/blk-mq-tag.c
> +++ b/block/blk-mq-tag.c
> @@ -20,7 +20,7 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
>   	if (!tags)
>   		return true;
>   
> -	return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
> +	return sbitmap_any_bit_clear(&tags->bitmap_tags->sb);
>   }
>   
>   /*
> @@ -43,9 +43,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
>    */
>   void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
>   {
> -	sbitmap_queue_wake_all(&tags->bitmap_tags);
> +	sbitmap_queue_wake_all(tags->bitmap_tags);
>   	if (include_reserve)
> -		sbitmap_queue_wake_all(&tags->breserved_tags);
> +		sbitmap_queue_wake_all(tags->breserved_tags);
>   }
>   
>   /*
> @@ -121,10 +121,10 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
>   			WARN_ON_ONCE(1);
>   			return BLK_MQ_TAG_FAIL;
>   		}
> -		bt = &tags->breserved_tags;
> +		bt = tags->breserved_tags;

We could put all of this in an earlier patch (as you had in v4, modulo 
dynamic memory part), which would be easier to review and get accepted.

>   		tag_offset = 0;
>   	} else {
> -		bt = &tags->bitmap_tags;
> +		bt = tags->bitmap_tags;
>   		tag_offset = tags->nr_reserved_tags;
>   	}


[...]

>   	if (!set)
> @@ -3160,6 +3179,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
>   			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
>   							false);
>   		} else {
> +			sched_tags = true;
>   			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
>   							nr, true);
>   		}
> @@ -3169,8 +3189,43 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
>   			q->elevator->type->ops.depth_updated(hctx);
>   	}
>   
> -	if (!ret)
> +	/*
> +	 * if ret is 0, all queues should have been updated to the same depth
> +	 * if not, then maybe some have been updated - yuk, need to handle this for shared sbitmap...
> +	 * if some are updated, we should probably roll back the change altogether. FIXME
> +	 */

If you don't have a shared sched bitmap - which I didn't think we needed 
- then all we need is a simple sbitmap_queue_resize(&tagset->__bitmap_tags)

Otherwise it's horrible to resize shared sched bitmaps...

> +	if (!ret) {
> +		if (blk_mq_is_sbitmap_shared(set)) {
> +			if (sched_tags) {
> +				blk_mq_exit_shared_sched_sbitmap(set);
> +				if (!blk_mq_init_sched_shared_sbitmap(set, nr))
> +					return -ENOMEM; /* fixup error handling */
> +
> +				queue_for_each_hw_ctx(q, hctx, i) {
> +					hctx->sched_tags->bitmap_tags =
> +						&set->__sched_bitmap_tags;
> +					hctx->sched_tags->breserved_tags =
> +						&set->__sched_breserved_tags;
> +				}
> +			} else {
> +				blk_mq_exit_shared_sbitmap(set);
> +				if (!blk_mq_init_shared_sbitmap(set))
> +					return -ENOMEM; /* fixup error handling */
> +
> +				queue_for_each_hw_ctx(q, hctx, i) {
> +					hctx->tags->bitmap_tags =
> +						&set->__bitmap_tags;
> +					hctx->tags->breserved_tags =
> +						&set->__breserved_tags;
> +				}
> +			}
> +		}
>   		q->nr_requests = nr;
> +	}
> +	/*
> +	 * if ret != 0, q->nr_requests would not be updated, yet the depth
> +	 * for some hctx may have changed - is that right?
> +	 */
>   
>   	blk_mq_unquiesce_queue(q);
>   	blk_mq_unfreeze_queue(q);
> diff --git a/block/blk-mq.h b/block/blk-mq.h
> index 78d38b5f2793..4c1ea206d3f4 100644
> --- a/block/blk-mq.h
> +++ b/block/blk-mq.h
> @@ -166,6 +166,11 @@ struct blk_mq_alloc_data {
>   	struct blk_mq_hw_ctx *hctx;
>   };
>   
> +static inline bool blk_mq_is_sbitmap_shared(struct blk_mq_tag_set *tag_set)
> +{
> +	return !!(tag_set->flags & BLK_MQ_F_TAG_HCTX_SHARED);

Bart already gave some comments on this

> +}
> +
>   static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
>   {
>   	if (data->flags & BLK_MQ_REQ_INTERNAL)
> diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
> index 34dcea0ef637..a7a537501d70 100644
> --- a/block/kyber-iosched.c
> +++ b/block/kyber-iosched.c
> @@ -359,7 +359,7 @@ static unsigned int kyber_sched_tags_shift(struct request_queue *q)
>   	 * All of the hardware queues have the same depth, so we can just grab
>   	 * the shift of the first one.
>   	 */
> -	return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
> +	return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
>   }
>   
>   static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
> @@ -502,7 +502,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
>   	khd->batching = 0;
>   
>   	hctx->sched_data = khd;
> -	sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags,
> +	sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
>   					kqd->async_depth);
>   
>   	return 0;
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index 147185394a25..10c9ed3dbe80 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -109,6 +109,12 @@ struct blk_mq_tag_set {
>   	unsigned int		flags;		/* BLK_MQ_F_* */
>   	void			*driver_data;
>   
> +	struct sbitmap_queue	__bitmap_tags;
> +	struct sbitmap_queue	__breserved_tags;
> +
> +	struct sbitmap_queue	__sched_bitmap_tags;
> +	struct sbitmap_queue	__sched_breserved_tags;
> +
>   	struct blk_mq_tags	**tags;
>   
>   	struct mutex		tag_list_lock;
> @@ -226,6 +232,9 @@ struct blk_mq_ops {
>   enum {
>   	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
>   	BLK_MQ_F_TAG_QUEUE_SHARED	= 1 << 1,
> +	BLK_MQ_F_TAG_HCTX_SHARED	= 1 << 2,
> +	BLK_MQ_F_TAG_BITMAP_ALLOCATED	= 1 << 3,
> +	BLK_MQ_F_TAG_SCHED_BITMAP_ALLOCATED = 1 << 4,
>   	BLK_MQ_F_BLOCKING	= 1 << 5,
>   	BLK_MQ_F_NO_SCHED	= 1 << 6,
>   	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
> 


  reply	other threads:[~2019-12-03 14:54 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-02 15:39 [PATCH RFC v5 00/11] blk-mq/scsi: Provide hostwide shared tags for SCSI HBAs Hannes Reinecke
2019-12-02 15:39 ` [PATCH 01/11] blk-mq: Remove some unused function arguments Hannes Reinecke
2019-12-02 15:39 ` [PATCH 02/11] blk-mq: rename BLK_MQ_F_TAG_SHARED as BLK_MQ_F_TAG_QUEUE_SHARED Hannes Reinecke
2019-12-02 15:39 ` [PATCH 03/11] blk-mq: rename blk_mq_update_tag_set_depth() Hannes Reinecke
2019-12-03 14:30   ` John Garry
2019-12-03 14:53     ` Hannes Reinecke
2019-12-02 15:39 ` [PATCH 04/11] blk-mq: Facilitate a shared sbitmap per tagset Hannes Reinecke
2019-12-03 14:54   ` John Garry [this message]
2019-12-03 15:02     ` Hannes Reinecke
2019-12-04 10:24       ` John Garry
2019-12-03 16:38     ` Bart Van Assche
2019-12-02 15:39 ` [PATCH 05/11] blk-mq: add WARN_ON in blk_mq_free_rqs() Hannes Reinecke
2019-12-02 15:39 ` [PATCH 06/11] blk-mq: move shared sbitmap into elevator queue Hannes Reinecke
2019-12-02 15:39 ` [PATCH 07/11] scsi: Add template flag 'host_tagset' Hannes Reinecke
2019-12-02 15:39 ` [PATCH 08/11] scsi: hisi_sas: Switch v3 hw to MQ Hannes Reinecke
2019-12-02 15:39 ` [PATCH 09/11] megaraid_sas: switch fusion adapters " Hannes Reinecke
2019-12-09 10:10   ` Sumit Saxena
2019-12-09 11:02     ` Hannes Reinecke
2020-01-10  4:00       ` Sumit Saxena
2020-01-10 12:18         ` John Garry
2020-01-13 17:42         ` John Garry
2020-01-14  7:05           ` Hannes Reinecke
2020-01-16 15:47             ` John Garry
2020-01-16 17:45               ` Hannes Reinecke
2020-01-17 11:40             ` Sumit Saxena
2020-01-17 11:18           ` Sumit Saxena
2020-02-13 10:07             ` John Garry
2020-02-17 10:09               ` Sumit Saxena
2020-01-09 11:55     ` John Garry
2020-01-09 15:19       ` Hannes Reinecke
2020-01-09 18:17         ` John Garry
2020-01-10  2:00       ` Ming Lei
2020-01-10 12:09         ` John Garry
2020-01-14 13:57           ` John Garry
2019-12-02 15:39 ` [PATCH 10/11] smartpqi: enable host tagset Hannes Reinecke
2019-12-02 15:39 ` [PATCH 11/11] hpsa: enable host_tagset and switch to MQ Hannes Reinecke
2020-02-26 11:09 ` [PATCH RFC v5 00/11] blk-mq/scsi: Provide hostwide shared tags for SCSI HBAs John Garry

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ab7555b2-2e95-6fb1-2e44-fe3a323a24e4@huawei.com \
    --to=john.garry@huawei.com \
    --cc=axboe@kernel.dk \
    --cc=hare@suse.de \
    --cc=hch@lst.de \
    --cc=james.bottomley@hansenpartnership.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=ming.lei@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).