All of lore.kernel.org
 help / color / mirror / Atom feed
* two small blk-mq cleanups
@ 2021-10-12 10:40 Christoph Hellwig
  2021-10-12 10:40 ` [PATCH 1/2] blk-mq: cleanup and rename __blk_mq_alloc_request Christoph Hellwig
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Christoph Hellwig @ 2021-10-12 10:40 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

Hi Jens,

this series cleans up some of the code recently touched for batched
request allocations a bit.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] blk-mq: cleanup and rename __blk_mq_alloc_request
  2021-10-12 10:40 two small blk-mq cleanups Christoph Hellwig
@ 2021-10-12 10:40 ` Christoph Hellwig
  2021-10-12 10:40 ` [PATCH 2/2] blk-mq: cleanup blk_mq_submit_bio Christoph Hellwig
  2021-10-12 14:54 ` two small blk-mq cleanups Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Christoph Hellwig @ 2021-10-12 10:40 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

The newly added loop for the cached requests in __blk_mq_alloc_request
is a little too convoluted for my taste, so unwind it a bit.  Also
rename the function to __blk_mq_alloc_requests now that it can allocate
more than a single request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 56 +++++++++++++++++++++++++-------------------------
 1 file changed, 28 insertions(+), 28 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index ced94eb8e2979..3fe3350616f13 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -347,7 +347,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 	return rq;
 }
 
-static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
+static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 {
 	struct request_queue *q = data->q;
 	struct elevator_queue *e = q->elevator;
@@ -388,36 +388,36 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
 	 */
 	do {
 		tag = blk_mq_get_tag(data);
-		if (tag != BLK_MQ_NO_TAG) {
-			rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
-			if (!--data->nr_tags)
-				return rq;
-			if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
-				return rq;
-			rq->rq_next = *data->cached_rq;
-			*data->cached_rq = rq;
-			data->flags |= BLK_MQ_REQ_NOWAIT;
-			continue;
+		if (tag == BLK_MQ_NO_TAG) {
+			if (data->flags & BLK_MQ_REQ_NOWAIT)
+				break;
+			/*
+			 * Give up the CPU and sleep for a random short time to
+			 * ensure that thread using a realtime scheduling class
+			 * are migrated off the CPU, and thus off the hctx that
+			 * is going away.
+			 */
+			msleep(3);
+			goto retry;
 		}
-		if (data->flags & BLK_MQ_REQ_NOWAIT)
-			break;
 
-		/*
-		 * Give up the CPU and sleep for a random short time to ensure
-		 * that thread using a realtime scheduling class are migrated
-		 * off the CPU, and thus off the hctx that is going away.
-		 */
-		msleep(3);
-		goto retry;
+		rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
+		if (!--data->nr_tags || e ||
+		    (data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
+			return rq;
+
+		/* link into the cached list */
+		rq->rq_next = *data->cached_rq;
+		*data->cached_rq = rq;
+		data->flags |= BLK_MQ_REQ_NOWAIT;
 	} while (1);
 
-	if (data->cached_rq) {
-		rq = *data->cached_rq;
-		*data->cached_rq = rq->rq_next;
-		return rq;
-	}
+	if (!data->cached_rq)
+		return NULL;
 
-	return NULL;
+	rq = *data->cached_rq;
+	*data->cached_rq = rq->rq_next;
+	return rq;
 }
 
 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
@@ -436,7 +436,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 	if (ret)
 		return ERR_PTR(ret);
 
-	rq = __blk_mq_alloc_request(&data);
+	rq = __blk_mq_alloc_requests(&data);
 	if (!rq)
 		goto out_queue_exit;
 	rq->__data_len = 0;
@@ -2251,7 +2251,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 			plug->nr_ios = 1;
 			data.cached_rq = &plug->cached_rq;
 		}
-		rq = __blk_mq_alloc_request(&data);
+		rq = __blk_mq_alloc_requests(&data);
 		if (unlikely(!rq)) {
 			rq_qos_cleanup(q, bio);
 			if (bio->bi_opf & REQ_NOWAIT)
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] blk-mq: cleanup blk_mq_submit_bio
  2021-10-12 10:40 two small blk-mq cleanups Christoph Hellwig
  2021-10-12 10:40 ` [PATCH 1/2] blk-mq: cleanup and rename __blk_mq_alloc_request Christoph Hellwig
@ 2021-10-12 10:40 ` Christoph Hellwig
  2021-10-12 14:54 ` two small blk-mq cleanups Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Christoph Hellwig @ 2021-10-12 10:40 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

Move the blk_mq_alloc_data stack allocation only into the branch
that actually needs it, and use rq->mq_hctx instead of data.hctx
to refer to the hctx.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3fe3350616f13..38e6651d8b94c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2209,10 +2209,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
 	const int is_sync = op_is_sync(bio->bi_opf);
 	const int is_flush_fua = op_is_flush(bio->bi_opf);
-	struct blk_mq_alloc_data data = {
-		.q		= q,
-		.nr_tags	= 1,
-	};
 	struct request *rq;
 	struct blk_plug *plug;
 	struct request *same_queue_rq = NULL;
@@ -2243,9 +2239,13 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 		rq = plug->cached_rq;
 		plug->cached_rq = rq->rq_next;
 		INIT_LIST_HEAD(&rq->queuelist);
-		data.hctx = rq->mq_hctx;
 	} else {
-		data.cmd_flags = bio->bi_opf;
+		struct blk_mq_alloc_data data = {
+			.q		= q,
+			.nr_tags	= 1,
+			.cmd_flags	= bio->bi_opf,
+		};
+
 		if (plug) {
 			data.nr_tags = plug->nr_ios;
 			plug->nr_ios = 1;
@@ -2264,7 +2264,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 
 	rq_qos_track(q, rq, bio);
 
-	cookie = request_to_qc_t(data.hctx, rq);
+	cookie = request_to_qc_t(rq->mq_hctx, rq);
 
 	blk_mq_bio_to_request(rq, bio, nr_segs);
 
@@ -2279,7 +2279,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 	if (unlikely(is_flush_fua)) {
 		/* Bypass scheduler for flush requests */
 		blk_insert_flush(rq);
-		blk_mq_run_hw_queue(data.hctx, true);
+		blk_mq_run_hw_queue(rq->mq_hctx, true);
 	} else if (plug && (q->nr_hw_queues == 1 ||
 		   blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
 		   q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
@@ -2326,18 +2326,17 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 		trace_block_plug(q);
 
 		if (same_queue_rq) {
-			data.hctx = same_queue_rq->mq_hctx;
 			trace_block_unplug(q, 1, true);
-			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
-					&cookie);
+			blk_mq_try_issue_directly(same_queue_rq->mq_hctx,
+						  same_queue_rq, &cookie);
 		}
 	} else if ((q->nr_hw_queues > 1 && is_sync) ||
-			!data.hctx->dispatch_busy) {
+		   !rq->mq_hctx->dispatch_busy) {
 		/*
 		 * There is no scheduler and we can try to send directly
 		 * to the hardware.
 		 */
-		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+		blk_mq_try_issue_directly(rq->mq_hctx, rq, &cookie);
 	} else {
 		/* Default case. */
 		blk_mq_sched_insert_request(rq, false, true, true);
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: two small blk-mq cleanups
  2021-10-12 10:40 two small blk-mq cleanups Christoph Hellwig
  2021-10-12 10:40 ` [PATCH 1/2] blk-mq: cleanup and rename __blk_mq_alloc_request Christoph Hellwig
  2021-10-12 10:40 ` [PATCH 2/2] blk-mq: cleanup blk_mq_submit_bio Christoph Hellwig
@ 2021-10-12 14:54 ` Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2021-10-12 14:54 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/12/21 4:40 AM, Christoph Hellwig wrote:
> Hi Jens,
> 
> this series cleans up some of the code recently touched for batched
> request allocations a bit.

Thanks, applied. I've got some further batch improvements and cleanups,
but I can do those on top of this. I'll send those out today.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-10-12 14:54 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-12 10:40 two small blk-mq cleanups Christoph Hellwig
2021-10-12 10:40 ` [PATCH 1/2] blk-mq: cleanup and rename __blk_mq_alloc_request Christoph Hellwig
2021-10-12 10:40 ` [PATCH 2/2] blk-mq: cleanup blk_mq_submit_bio Christoph Hellwig
2021-10-12 14:54 ` two small blk-mq cleanups Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.