linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHSET v2 0/5] Alloc batch fixes
@ 2021-11-04 15:21 Jens Axboe
  2021-11-04 15:22 ` [PATCH 1/5] block: have plug stored requests hold references to the queue Jens Axboe
                   ` (4 more replies)
  0 siblings, 5 replies; 9+ messages in thread
From: Jens Axboe @ 2021-11-04 15:21 UTC (permalink / raw)
  To: linux-block

Hi,

A few fixes related to the batched allocations:

- Have the requests hold a queue reference, flush them on schedule
  unplug as well.

- Make sure the queue matches, could be a mismatch if we're driving
  multiple devices.

Since v1:

- Reshuffle series to do plug rq alloc helper before enter changes
- Protect submit_bio_checks() by queue enter reference as well

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/5] block: have plug stored requests hold references to the queue
  2021-11-04 15:21 [PATCHSET v2 0/5] Alloc batch fixes Jens Axboe
@ 2021-11-04 15:22 ` Jens Axboe
  2021-11-04 15:22 ` [PATCH 2/5] block: make blk_try_enter_queue() available for blk-mq Jens Axboe
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 9+ messages in thread
From: Jens Axboe @ 2021-11-04 15:22 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe

Requests that were stored in the cache deliberately didn't hold an enter
reference to the queue, instead we grabbed one every time we pulled a
request out of there. That made for awkward logic on freeing the remainder
of the cached list, if needed, where we had to artificially raise the
queue usage count before each free.

Grab references up front for cached plug requests. That's safer, and also
more efficient.

Fixes: 47c122e35d7e ("block: pre-allocate requests if plug is started and is a batch")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-core.c | 2 +-
 block/blk-mq.c   | 7 ++++---
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index fd389a16013c..c2d267b6f910 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1643,7 +1643,7 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
 		flush_plug_callbacks(plug, from_schedule);
 	if (!rq_list_empty(plug->mq_list))
 		blk_mq_flush_plug_list(plug, from_schedule);
-	if (unlikely(!from_schedule && plug->cached_rq))
+	if (unlikely(!rq_list_empty(plug->cached_rq)))
 		blk_mq_free_plug_rqs(plug);
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c68aa0a332e1..5498454c2164 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -410,7 +410,10 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
 		tag_mask &= ~(1UL << i);
 		rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
 		rq_list_add(data->cached_rq, rq);
+		nr++;
 	}
+	/* caller already holds a reference, add for remainder */
+	percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
 	data->nr_tags -= nr;
 
 	return rq_list_pop(data->cached_rq);
@@ -630,10 +633,8 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
 {
 	struct request *rq;
 
-	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) {
-		percpu_ref_get(&rq->q->q_usage_counter);
+	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
 		blk_mq_free_request(rq);
-	}
 }
 
 static void req_bio_endio(struct request *rq, struct bio *bio,
-- 
2.33.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/5] block: make blk_try_enter_queue() available for blk-mq
  2021-11-04 15:21 [PATCHSET v2 0/5] Alloc batch fixes Jens Axboe
  2021-11-04 15:22 ` [PATCH 1/5] block: have plug stored requests hold references to the queue Jens Axboe
@ 2021-11-04 15:22 ` Jens Axboe
  2021-11-04 15:22 ` [PATCH 3/5] block: move plug rq alloc into helper Jens Axboe
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 9+ messages in thread
From: Jens Axboe @ 2021-11-04 15:22 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe

Just a prep patch for shifting the queue enter logic.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-core.c | 26 +-------------------------
 block/blk.h      | 25 +++++++++++++++++++++++++
 2 files changed, 26 insertions(+), 25 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index c2d267b6f910..e00f5a2287cc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_cleanup_queue);
 
-static bool blk_try_enter_queue(struct request_queue *q, bool pm)
-{
-	rcu_read_lock();
-	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
-		goto fail;
-
-	/*
-	 * The code that increments the pm_only counter must ensure that the
-	 * counter is globally visible before the queue is unfrozen.
-	 */
-	if (blk_queue_pm_only(q) &&
-	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
-		goto fail_put;
-
-	rcu_read_unlock();
-	return true;
-
-fail_put:
-	blk_queue_exit(q);
-fail:
-	rcu_read_unlock();
-	return false;
-}
-
 /**
  * blk_queue_enter() - try to increase q->q_usage_counter
  * @q: request queue pointer
@@ -442,7 +418,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 	return 0;
 }
 
-static inline int bio_queue_enter(struct bio *bio)
+int bio_queue_enter(struct bio *bio)
 {
 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
diff --git a/block/blk.h b/block/blk.h
index 7afffd548daf..f7371d3b1522 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -55,6 +55,31 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
 void blk_freeze_queue(struct request_queue *q);
 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
 void blk_queue_start_drain(struct request_queue *q);
+int bio_queue_enter(struct bio *bio);
+
+static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
+{
+	rcu_read_lock();
+	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
+		goto fail;
+
+	/*
+	 * The code that increments the pm_only counter must ensure that the
+	 * counter is globally visible before the queue is unfrozen.
+	 */
+	if (blk_queue_pm_only(q) &&
+	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
+		goto fail_put;
+
+	rcu_read_unlock();
+	return true;
+
+fail_put:
+	blk_queue_exit(q);
+fail:
+	rcu_read_unlock();
+	return false;
+}
 
 #define BIO_INLINE_VECS 4
 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
-- 
2.33.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/5] block: move plug rq alloc into helper
  2021-11-04 15:21 [PATCHSET v2 0/5] Alloc batch fixes Jens Axboe
  2021-11-04 15:22 ` [PATCH 1/5] block: have plug stored requests hold references to the queue Jens Axboe
  2021-11-04 15:22 ` [PATCH 2/5] block: make blk_try_enter_queue() available for blk-mq Jens Axboe
@ 2021-11-04 15:22 ` Jens Axboe
  2021-11-04 15:22 ` [PATCH 4/5] block: move queue enter logic into blk_mq_submit_bio() Jens Axboe
  2021-11-04 15:22 ` [PATCH 5/5] block: ensure cached plug request matches the current queue Jens Axboe
  4 siblings, 0 replies; 9+ messages in thread
From: Jens Axboe @ 2021-11-04 15:22 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe

This is in preparation for a fix, but serves as a cleanup as well moving
the plugged request logic out of blk_mq_submit_bio().

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-mq.c | 23 +++++++++++++++++++----
 1 file changed, 19 insertions(+), 4 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5498454c2164..f7f36d5ed25a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2478,6 +2478,23 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
 	return BLK_MAX_REQUEST_COUNT;
 }
 
+static inline struct request *blk_get_plug_request(struct request_queue *q,
+						   struct blk_plug *plug,
+						   struct bio *bio)
+{
+	struct request *rq;
+
+	if (!plug)
+		return NULL;
+	rq = rq_list_peek(&plug->cached_rq);
+	if (rq) {
+		plug->cached_rq = rq_list_next(rq);
+		INIT_LIST_HEAD(&rq->queuelist);
+		return rq;
+	}
+	return NULL;
+}
+
 /**
  * blk_mq_submit_bio - Create and send a request to block device.
  * @bio: Bio pointer.
@@ -2518,10 +2535,8 @@ void blk_mq_submit_bio(struct bio *bio)
 	rq_qos_throttle(q, bio);
 
 	plug = blk_mq_plug(q, bio);
-	if (plug && plug->cached_rq) {
-		rq = rq_list_pop(&plug->cached_rq);
-		INIT_LIST_HEAD(&rq->queuelist);
-	} else {
+	rq = blk_get_plug_request(q, plug, bio);
+	if (!rq) {
 		struct blk_mq_alloc_data data = {
 			.q		= q,
 			.nr_tags	= 1,
-- 
2.33.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 4/5] block: move queue enter logic into blk_mq_submit_bio()
  2021-11-04 15:21 [PATCHSET v2 0/5] Alloc batch fixes Jens Axboe
                   ` (2 preceding siblings ...)
  2021-11-04 15:22 ` [PATCH 3/5] block: move plug rq alloc into helper Jens Axboe
@ 2021-11-04 15:22 ` Jens Axboe
  2021-11-04 15:22 ` [PATCH 5/5] block: ensure cached plug request matches the current queue Jens Axboe
  4 siblings, 0 replies; 9+ messages in thread
From: Jens Axboe @ 2021-11-04 15:22 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe

Retain the old logic for the fops based submit, but for our internal
blk_mq_submit_bio(), move the queue entering logic into the core
function itself.

We need to be a bit careful if going into the scheduler, as a scheduler
or queue mappings can arbitrarily change before we have entered the queue.
Have the bio scheduler mapping do that separately, it's a very cheap
operation compared to actually doing merging locking and lookups.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-core.c     | 17 ++++++---------
 block/blk-mq-sched.c | 13 ++++++++---
 block/blk-mq.c       | 51 +++++++++++++++++++++++++++++---------------
 block/blk.h          |  1 +
 4 files changed, 52 insertions(+), 30 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index e00f5a2287cc..18aab7f8469a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -746,7 +746,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
 	return BLK_STS_OK;
 }
 
-static noinline_for_stack bool submit_bio_checks(struct bio *bio)
+noinline_for_stack bool submit_bio_checks(struct bio *bio)
 {
 	struct block_device *bdev = bio->bi_bdev;
 	struct request_queue *q = bdev_get_queue(bdev);
@@ -868,18 +868,15 @@ static void __submit_bio(struct bio *bio)
 {
 	struct gendisk *disk = bio->bi_bdev->bd_disk;
 
-	if (unlikely(bio_queue_enter(bio) != 0))
-		return;
-
-	if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
-		goto queue_exit;
 	if (!disk->fops->submit_bio) {
 		blk_mq_submit_bio(bio);
-		return;
+	} else {
+		if (unlikely(bio_queue_enter(bio) != 0))
+			return;
+		if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
+			disk->fops->submit_bio(bio);
+		blk_queue_exit(disk->queue);
 	}
-	disk->fops->submit_bio(bio);
-queue_exit:
-	blk_queue_exit(disk->queue);
 }
 
 /*
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 4a6789e4398b..4be652fa38e7 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -370,15 +370,20 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
 	bool ret = false;
 	enum hctx_type type;
 
-	if (e && e->type->ops.bio_merge)
-		return e->type->ops.bio_merge(q, bio, nr_segs);
+	if (bio_queue_enter(bio))
+		return false;
+
+	if (e && e->type->ops.bio_merge) {
+		ret = e->type->ops.bio_merge(q, bio, nr_segs);
+		goto out_put;
+	}
 
 	ctx = blk_mq_get_ctx(q);
 	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
 	type = hctx->type;
 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
 	    list_empty_careful(&ctx->rq_lists[type]))
-		return false;
+		goto out_put;
 
 	/* default per sw-queue merge */
 	spin_lock(&ctx->lock);
@@ -391,6 +396,8 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
 		ret = true;
 
 	spin_unlock(&ctx->lock);
+out_put:
+	blk_queue_exit(q);
 	return ret;
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f7f36d5ed25a..b0c0eac43eef 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2487,12 +2487,21 @@ static inline struct request *blk_get_plug_request(struct request_queue *q,
 	if (!plug)
 		return NULL;
 	rq = rq_list_peek(&plug->cached_rq);
-	if (rq) {
-		plug->cached_rq = rq_list_next(rq);
-		INIT_LIST_HEAD(&rq->queuelist);
-		return rq;
-	}
-	return NULL;
+	if (!rq)
+		return NULL;
+	if (unlikely(!submit_bio_checks(bio)))
+		return ERR_PTR(-EIO);
+	plug->cached_rq = rq_list_next(rq);
+	INIT_LIST_HEAD(&rq->queuelist);
+	rq_qos_throttle(q, bio);
+	return rq;
+}
+
+static inline bool blk_mq_queue_enter(struct request_queue *q, struct bio *bio)
+{
+	if (!blk_try_enter_queue(q, false) && bio_queue_enter(bio))
+		return false;
+	return true;
 }
 
 /**
@@ -2518,31 +2527,41 @@ void blk_mq_submit_bio(struct bio *bio)
 	unsigned int nr_segs = 1;
 	blk_status_t ret;
 
+	if (unlikely(!blk_crypto_bio_prep(&bio)))
+		return;
+
 	blk_queue_bounce(q, &bio);
 	if (blk_may_split(q, bio))
 		__blk_queue_split(q, &bio, &nr_segs);
 
 	if (!bio_integrity_prep(bio))
-		goto queue_exit;
+		return;
 
 	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
 		if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
-			goto queue_exit;
+			return;
 		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
-			goto queue_exit;
+			return;
 	}
 
-	rq_qos_throttle(q, bio);
-
 	plug = blk_mq_plug(q, bio);
 	rq = blk_get_plug_request(q, plug, bio);
-	if (!rq) {
+	if (IS_ERR(rq)) {
+		return;
+	} else if (!rq) {
 		struct blk_mq_alloc_data data = {
 			.q		= q,
 			.nr_tags	= 1,
 			.cmd_flags	= bio->bi_opf,
 		};
 
+		if (unlikely(!blk_mq_queue_enter(q, bio)))
+			return;
+		if (unlikely(!submit_bio_checks(bio)))
+			goto put_exit;
+
+		rq_qos_throttle(q, bio);
+
 		if (plug) {
 			data.nr_tags = plug->nr_ios;
 			plug->nr_ios = 1;
@@ -2553,7 +2572,9 @@ void blk_mq_submit_bio(struct bio *bio)
 			rq_qos_cleanup(q, bio);
 			if (bio->bi_opf & REQ_NOWAIT)
 				bio_wouldblock_error(bio);
-			goto queue_exit;
+put_exit:
+			blk_queue_exit(q);
+			return;
 		}
 	}
 
@@ -2636,10 +2657,6 @@ void blk_mq_submit_bio(struct bio *bio)
 		/* Default case. */
 		blk_mq_sched_insert_request(rq, false, true, true);
 	}
-
-	return;
-queue_exit:
-	blk_queue_exit(q);
 }
 
 static size_t order_to_size(unsigned int order)
diff --git a/block/blk.h b/block/blk.h
index f7371d3b1522..79c98ced59c8 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -56,6 +56,7 @@ void blk_freeze_queue(struct request_queue *q);
 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
 void blk_queue_start_drain(struct request_queue *q);
 int bio_queue_enter(struct bio *bio);
+bool submit_bio_checks(struct bio *bio);
 
 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
 {
-- 
2.33.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 5/5] block: ensure cached plug request matches the current queue
  2021-11-04 15:21 [PATCHSET v2 0/5] Alloc batch fixes Jens Axboe
                   ` (3 preceding siblings ...)
  2021-11-04 15:22 ` [PATCH 4/5] block: move queue enter logic into blk_mq_submit_bio() Jens Axboe
@ 2021-11-04 15:22 ` Jens Axboe
  4 siblings, 0 replies; 9+ messages in thread
From: Jens Axboe @ 2021-11-04 15:22 UTC (permalink / raw)
  To: linux-block; +Cc: Jens Axboe

If we're driving multiple devices, we could have pre-populated the cache
for a different device. Ensure that the empty request matches the current
queue.

Fixes: 47c122e35d7e ("block: pre-allocate requests if plug is started and is a batch")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-mq.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index b0c0eac43eef..e9397bcdd90c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2487,7 +2487,7 @@ static inline struct request *blk_get_plug_request(struct request_queue *q,
 	if (!plug)
 		return NULL;
 	rq = rq_list_peek(&plug->cached_rq);
-	if (!rq)
+	if (!rq || rq->q != q)
 		return NULL;
 	if (unlikely(!submit_bio_checks(bio)))
 		return ERR_PTR(-EIO);
-- 
2.33.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/5] block: have plug stored requests hold references to the queue
  2021-11-04 18:34   ` Christoph Hellwig
@ 2021-11-04 18:35     ` Jens Axboe
  0 siblings, 0 replies; 9+ messages in thread
From: Jens Axboe @ 2021-11-04 18:35 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 11/4/21 12:34 PM, Christoph Hellwig wrote:
> On Thu, Nov 04, 2021 at 12:21:57PM -0600, Jens Axboe wrote:
>> Requests that were stored in the cache deliberately didn't hold an enter
>> reference to the queue, instead we grabbed one every time we pulled a
>> request out of there. That made for awkward logic on freeing the remainder
>> of the cached list, if needed, where we had to artificially raise the
>> queue usage count before each free.
>>
>> Grab references up front for cached plug requests. That's safer, and also
>> more efficient.
> 
> I think it would be useful to add zour explanation why the cached
> requests should be flushed at schedule time now here.
> 
> Otherwise this looks good:
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>

Thanks, I'll add a comment when amending with your reviewed-by.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/5] block: have plug stored requests hold references to the queue
  2021-11-04 18:21 ` [PATCH 1/5] block: have plug stored requests hold references to the queue Jens Axboe
@ 2021-11-04 18:34   ` Christoph Hellwig
  2021-11-04 18:35     ` Jens Axboe
  0 siblings, 1 reply; 9+ messages in thread
From: Christoph Hellwig @ 2021-11-04 18:34 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, hch

On Thu, Nov 04, 2021 at 12:21:57PM -0600, Jens Axboe wrote:
> Requests that were stored in the cache deliberately didn't hold an enter
> reference to the queue, instead we grabbed one every time we pulled a
> request out of there. That made for awkward logic on freeing the remainder
> of the cached list, if needed, where we had to artificially raise the
> queue usage count before each free.
> 
> Grab references up front for cached plug requests. That's safer, and also
> more efficient.

I think it would be useful to add zour explanation why the cached
requests should be flushed at schedule time now here.

Otherwise this looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/5] block: have plug stored requests hold references to the queue
  2021-11-04 18:21 [PATCHSET v3 0/5] Alloc batch fixes Jens Axboe
@ 2021-11-04 18:21 ` Jens Axboe
  2021-11-04 18:34   ` Christoph Hellwig
  0 siblings, 1 reply; 9+ messages in thread
From: Jens Axboe @ 2021-11-04 18:21 UTC (permalink / raw)
  To: linux-block; +Cc: hch, Jens Axboe

Requests that were stored in the cache deliberately didn't hold an enter
reference to the queue, instead we grabbed one every time we pulled a
request out of there. That made for awkward logic on freeing the remainder
of the cached list, if needed, where we had to artificially raise the
queue usage count before each free.

Grab references up front for cached plug requests. That's safer, and also
more efficient.

Fixes: 47c122e35d7e ("block: pre-allocate requests if plug is started and is a batch")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-core.c | 2 +-
 block/blk-mq.c   | 7 ++++---
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index fd389a16013c..c2d267b6f910 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1643,7 +1643,7 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
 		flush_plug_callbacks(plug, from_schedule);
 	if (!rq_list_empty(plug->mq_list))
 		blk_mq_flush_plug_list(plug, from_schedule);
-	if (unlikely(!from_schedule && plug->cached_rq))
+	if (unlikely(!rq_list_empty(plug->cached_rq)))
 		blk_mq_free_plug_rqs(plug);
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c68aa0a332e1..5498454c2164 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -410,7 +410,10 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
 		tag_mask &= ~(1UL << i);
 		rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
 		rq_list_add(data->cached_rq, rq);
+		nr++;
 	}
+	/* caller already holds a reference, add for remainder */
+	percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
 	data->nr_tags -= nr;
 
 	return rq_list_pop(data->cached_rq);
@@ -630,10 +633,8 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
 {
 	struct request *rq;
 
-	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) {
-		percpu_ref_get(&rq->q->q_usage_counter);
+	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
 		blk_mq_free_request(rq);
-	}
 }
 
 static void req_bio_endio(struct request *rq, struct bio *bio,
-- 
2.33.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-11-04 18:35 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-04 15:21 [PATCHSET v2 0/5] Alloc batch fixes Jens Axboe
2021-11-04 15:22 ` [PATCH 1/5] block: have plug stored requests hold references to the queue Jens Axboe
2021-11-04 15:22 ` [PATCH 2/5] block: make blk_try_enter_queue() available for blk-mq Jens Axboe
2021-11-04 15:22 ` [PATCH 3/5] block: move plug rq alloc into helper Jens Axboe
2021-11-04 15:22 ` [PATCH 4/5] block: move queue enter logic into blk_mq_submit_bio() Jens Axboe
2021-11-04 15:22 ` [PATCH 5/5] block: ensure cached plug request matches the current queue Jens Axboe
2021-11-04 18:21 [PATCHSET v3 0/5] Alloc batch fixes Jens Axboe
2021-11-04 18:21 ` [PATCH 1/5] block: have plug stored requests hold references to the queue Jens Axboe
2021-11-04 18:34   ` Christoph Hellwig
2021-11-04 18:35     ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).