linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHSET v2] Improve plugging
@ 2021-10-19 12:08 Jens Axboe
  2021-10-19 12:08 ` [PATCH 1/2] block: change plugging to use a singly linked list Jens Axboe
  2021-10-19 12:08 ` [PATCH 2/2] block: attempt direct issue of plug list Jens Axboe
  0 siblings, 2 replies; 12+ messages in thread
From: Jens Axboe @ 2021-10-19 12:08 UTC (permalink / raw)
  To: linux-block; +Cc: hch

Hi,

Split this into two patches - one that implements the singly linked
list as directly as possible, and one that adds direct issue of the
plug list. The sum of the two patches are the same, but they do look
nicer split like this.

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/2] block: change plugging to use a singly linked list
  2021-10-19 12:08 [PATCHSET v2] Improve plugging Jens Axboe
@ 2021-10-19 12:08 ` Jens Axboe
  2021-10-19 13:34   ` Christoph Hellwig
  2021-10-19 12:08 ` [PATCH 2/2] block: attempt direct issue of plug list Jens Axboe
  1 sibling, 1 reply; 12+ messages in thread
From: Jens Axboe @ 2021-10-19 12:08 UTC (permalink / raw)
  To: linux-block; +Cc: hch, Jens Axboe

Use a singly linked list for the blk_plug. This saves 8 bytes in the
blk_plug struct, and makes for faster list manipulations than doubly
linked lists. As we don't use the doubly linked lists for anything,
singly linked is just fine.

This yields a bump in default (merging enabled) performance from 7.0
to 7.1M IOPS, and ~7.5M IOPS with merging disabled.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-core.c       |  4 +--
 block/blk-merge.c      |  4 +--
 block/blk-mq.c         | 80 ++++++++++++++++++++++++------------------
 include/linux/blkdev.h |  5 ++-
 4 files changed, 51 insertions(+), 42 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index d0c2e11411d0..14d20909f61a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1550,7 +1550,7 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
 	if (tsk->plug)
 		return;
 
-	INIT_LIST_HEAD(&plug->mq_list);
+	plug->mq_list = NULL;
 	plug->cached_rq = NULL;
 	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
 	plug->rq_count = 0;
@@ -1640,7 +1640,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
 	flush_plug_callbacks(plug, from_schedule);
 
-	if (!list_empty(&plug->mq_list))
+	if (!rq_list_empty(plug->mq_list))
 		blk_mq_flush_plug_list(plug, from_schedule);
 	if (unlikely(!from_schedule && plug->cached_rq))
 		blk_mq_free_plug_rqs(plug);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index c273b58378ce..3e6fa449caff 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -1090,11 +1090,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 	struct request *rq;
 
 	plug = blk_mq_plug(q, bio);
-	if (!plug || list_empty(&plug->mq_list))
+	if (!plug || rq_list_empty(plug->mq_list))
 		return false;
 
 	/* check the previously added entry for a quick merge attempt */
-	rq = list_last_entry(&plug->mq_list, struct request, queuelist);
+	rq = rq_list_peek(&plug->mq_list);
 	if (rq->q == q) {
 		/*
 		 * Only blk-mq multiple hardware queues case checks the rq in
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 82d8ad837057..620233b85af2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2149,34 +2149,46 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 
 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
+	struct blk_mq_hw_ctx *this_hctx;
+	struct blk_mq_ctx *this_ctx;
+	unsigned int depth;
 	LIST_HEAD(list);
 
-	if (list_empty(&plug->mq_list))
+	if (rq_list_empty(plug->mq_list))
 		return;
-	list_splice_init(&plug->mq_list, &list);
 	plug->rq_count = 0;
 
+	this_hctx = NULL;
+	this_ctx = NULL;
+	depth = 0;
 	do {
-		struct list_head rq_list;
-		struct request *rq, *head_rq = list_entry_rq(list.next);
-		struct list_head *pos = &head_rq->queuelist; /* skip first */
-		struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
-		struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
-		unsigned int depth = 1;
-
-		list_for_each_continue(pos, &list) {
-			rq = list_entry_rq(pos);
-			BUG_ON(!rq->q);
-			if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
-				break;
-			depth++;
+		struct request *rq;
+
+		rq = rq_list_pop(&plug->mq_list);
+
+		if (!this_hctx) {
+			this_hctx = rq->mq_hctx;
+			this_ctx = rq->mq_ctx;
+		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+			trace_block_unplug(this_hctx->queue, depth,
+						!from_schedule);
+			blk_mq_sched_insert_requests(this_hctx, this_ctx,
+						&list, from_schedule);
+			depth = 0;
+			this_hctx = rq->mq_hctx;
+			this_ctx = rq->mq_ctx;
+
 		}
 
-		list_cut_before(&rq_list, &list, pos);
-		trace_block_unplug(head_rq->q, depth, !from_schedule);
-		blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
+		list_add(&rq->queuelist, &list);
+		depth++;
+	} while (!rq_list_empty(plug->mq_list));
+
+	if (!list_empty(&list)) {
+		trace_block_unplug(this_hctx->queue, depth, !from_schedule);
+		blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
 						from_schedule);
-	} while(!list_empty(&list));
+	}
 }
 
 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
@@ -2356,16 +2368,15 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
 
 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
 {
-	list_add_tail(&rq->queuelist, &plug->mq_list);
-	plug->rq_count++;
-	if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
-		struct request *tmp;
+	if (!plug->multiple_queues) {
+		struct request *nxt = rq_list_peek(&plug->mq_list);
 
-		tmp = list_first_entry(&plug->mq_list, struct request,
-						queuelist);
-		if (tmp->q != rq->q)
+		if (nxt && nxt->q != rq->q)
 			plug->multiple_queues = true;
 	}
+	rq->rq_next = NULL;
+	rq_list_add(&plug->mq_list, rq);
+	plug->rq_count++;
 }
 
 /*
@@ -2477,13 +2488,15 @@ void blk_mq_submit_bio(struct bio *bio)
 		unsigned int request_count = plug->rq_count;
 		struct request *last = NULL;
 
-		if (!request_count)
+		if (!request_count) {
 			trace_block_plug(q);
-		else
-			last = list_entry_rq(plug->mq_list.prev);
+		} else if (!blk_queue_nomerges(q)) {
+			last = rq_list_peek(&plug->mq_list);
+			if (blk_rq_bytes(last) < BLK_PLUG_FLUSH_SIZE)
+				last = NULL;
+		}
 
-		if (request_count >= blk_plug_max_rq_count(plug) || (last &&
-		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
+		if (request_count >= blk_plug_max_rq_count(plug) || last) {
 			blk_flush_plug_list(plug, false);
 			trace_block_plug(q);
 		}
@@ -2503,10 +2516,7 @@ void blk_mq_submit_bio(struct bio *bio)
 		 * the plug list is empty, and same_queue_rq is invalid.
 		 */
 		if (same_queue_rq) {
-			next_rq = list_last_entry(&plug->mq_list,
-							struct request,
-							queuelist);
-			list_del_init(&next_rq->queuelist);
+			next_rq = rq_list_pop(&plug->mq_list);
 			plug->rq_count--;
 		}
 		blk_add_rq_to_plug(plug, rq);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index abe721591e80..80668e316eea 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -711,7 +711,7 @@ extern void blk_set_queue_dying(struct request_queue *);
  * schedule() where blk_schedule_flush_plug() is called.
  */
 struct blk_plug {
-	struct list_head mq_list; /* blk-mq requests */
+	struct request *mq_list; /* blk-mq requests */
 
 	/* if ios_left is > 1, we can batch tag/rq allocations */
 	struct request *cached_rq;
@@ -760,8 +760,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
 	struct blk_plug *plug = tsk->plug;
 
 	return plug &&
-		 (!list_empty(&plug->mq_list) ||
-		 !list_empty(&plug->cb_list));
+		 (plug->mq_list || !list_empty(&plug->cb_list));
 }
 
 int blkdev_issue_flush(struct block_device *bdev);
-- 
2.33.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/2] block: attempt direct issue of plug list
  2021-10-19 12:08 [PATCHSET v2] Improve plugging Jens Axboe
  2021-10-19 12:08 ` [PATCH 1/2] block: change plugging to use a singly linked list Jens Axboe
@ 2021-10-19 12:08 ` Jens Axboe
  2021-10-19 13:36   ` Christoph Hellwig
                     ` (2 more replies)
  1 sibling, 3 replies; 12+ messages in thread
From: Jens Axboe @ 2021-10-19 12:08 UTC (permalink / raw)
  To: linux-block; +Cc: hch, Jens Axboe

If we have just one queue type in the plug list, then we can extend our
direct issue to cover a full plug list as well.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-core.c       |  1 +
 block/blk-mq.c         | 60 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/blkdev.h |  1 +
 3 files changed, 62 insertions(+)

diff --git a/block/blk-core.c b/block/blk-core.c
index 14d20909f61a..e6ad5b51d0c3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1555,6 +1555,7 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
 	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
 	plug->rq_count = 0;
 	plug->multiple_queues = false;
+	plug->has_elevator = false;
 	plug->nowait = false;
 	INIT_LIST_HEAD(&plug->cb_list);
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 620233b85af2..d0fe86b46d1b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2147,6 +2147,58 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 	spin_unlock(&ctx->lock);
 }
 
+static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
+			      bool from_schedule)
+{
+	if (hctx->queue->mq_ops->commit_rqs) {
+		trace_block_unplug(hctx->queue, *queued, !from_schedule);
+		hctx->queue->mq_ops->commit_rqs(hctx);
+	}
+	*queued = 0;
+}
+
+static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
+{
+	struct blk_mq_hw_ctx *hctx = NULL;
+	struct request *rq;
+	int queued = 0;
+	int errors = 0;
+
+	while ((rq = rq_list_pop(&plug->mq_list))) {
+		bool last = rq_list_empty(plug->mq_list);
+		blk_status_t ret;
+
+		if (hctx != rq->mq_hctx) {
+			if (hctx)
+				blk_mq_commit_rqs(hctx, &queued, from_schedule);
+			hctx = rq->mq_hctx;
+		}
+
+		ret = blk_mq_request_issue_directly(rq, last);
+		switch (ret) {
+		case BLK_STS_OK:
+			queued++;
+			break;
+		case BLK_STS_RESOURCE:
+		case BLK_STS_DEV_RESOURCE:
+			blk_mq_request_bypass_insert(rq, false, last);
+			blk_mq_commit_rqs(hctx, &queued, from_schedule);
+			return;
+		default:
+			blk_mq_end_request(rq, ret);
+			errors++;
+			break;
+		}
+	}
+
+	/*
+	 * If we didn't flush the entire list, we could have told the driver
+	 * there was more coming, but that turned out to be a lie.
+	 */
+	if (errors)
+		blk_mq_commit_rqs(hctx, &queued, from_schedule);
+}
+
 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
 	struct blk_mq_hw_ctx *this_hctx;
@@ -2158,6 +2210,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 		return;
 	plug->rq_count = 0;
 
+	if (!plug->multiple_queues && !plug->has_elevator) {
+		blk_mq_plug_issue_direct(plug, from_schedule);
+		if (rq_list_empty(plug->mq_list))
+			return;
+	}
+
 	this_hctx = NULL;
 	this_ctx = NULL;
 	depth = 0;
@@ -2374,6 +2432,8 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
 		if (nxt && nxt->q != rq->q)
 			plug->multiple_queues = true;
 	}
+	if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
+		plug->has_elevator = true;
 	rq->rq_next = NULL;
 	rq_list_add(&plug->mq_list, rq);
 	plug->rq_count++;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 80668e316eea..2e93682f8f68 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -720,6 +720,7 @@ struct blk_plug {
 	unsigned short rq_count;
 
 	bool multiple_queues;
+	bool has_elevator;
 	bool nowait;
 
 	struct list_head cb_list; /* md requires an unplug callback */
-- 
2.33.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] block: change plugging to use a singly linked list
  2021-10-19 12:08 ` [PATCH 1/2] block: change plugging to use a singly linked list Jens Axboe
@ 2021-10-19 13:34   ` Christoph Hellwig
  2021-10-19 13:45     ` Jens Axboe
  0 siblings, 1 reply; 12+ messages in thread
From: Christoph Hellwig @ 2021-10-19 13:34 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, hch

On Tue, Oct 19, 2021 at 06:08:33AM -0600, Jens Axboe wrote:
> +						!from_schedule);
> +			blk_mq_sched_insert_requests(this_hctx, this_ctx,
> +						&list, from_schedule);
> +			depth = 0;
> +			this_hctx = rq->mq_hctx;
> +			this_ctx = rq->mq_ctx;
> +
>  		}
>  
> +		list_add(&rq->queuelist, &list);

I think this needs to be a list_add_tail to keep the request ordered.

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] block: attempt direct issue of plug list
  2021-10-19 12:08 ` [PATCH 2/2] block: attempt direct issue of plug list Jens Axboe
@ 2021-10-19 13:36   ` Christoph Hellwig
  2021-10-19 13:45     ` Jens Axboe
  2021-10-26  5:20   ` Shinichiro Kawasaki
  2021-10-27  1:13   ` Guenter Roeck
  2 siblings, 1 reply; 12+ messages in thread
From: Christoph Hellwig @ 2021-10-19 13:36 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, hch

On Tue, Oct 19, 2021 at 06:08:34AM -0600, Jens Axboe wrote:
> If we have just one queue type in the plug list, then we can extend our
> direct issue to cover a full plug list as well.

I don't think this description matches what the code does.  My impression
of what the code does it:

If a plug only has requests for a single queue, and that queue does not
use an I/O scheduler, we can just issue the list of requests directly
from the plug.

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] block: change plugging to use a singly linked list
  2021-10-19 13:34   ` Christoph Hellwig
@ 2021-10-19 13:45     ` Jens Axboe
  0 siblings, 0 replies; 12+ messages in thread
From: Jens Axboe @ 2021-10-19 13:45 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/19/21 7:34 AM, Christoph Hellwig wrote:
> On Tue, Oct 19, 2021 at 06:08:33AM -0600, Jens Axboe wrote:
>> +						!from_schedule);
>> +			blk_mq_sched_insert_requests(this_hctx, this_ctx,
>> +						&list, from_schedule);
>> +			depth = 0;
>> +			this_hctx = rq->mq_hctx;
>> +			this_ctx = rq->mq_ctx;
>> +
>>  		}
>>  
>> +		list_add(&rq->queuelist, &list);
> 
> I think this needs to be a list_add_tail to keep the request ordered.

I think it does the right thing, the singly linked list is LIFO, so we
effectively just reverse it here.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] block: attempt direct issue of plug list
  2021-10-19 13:36   ` Christoph Hellwig
@ 2021-10-19 13:45     ` Jens Axboe
  0 siblings, 0 replies; 12+ messages in thread
From: Jens Axboe @ 2021-10-19 13:45 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block

On 10/19/21 7:36 AM, Christoph Hellwig wrote:
> On Tue, Oct 19, 2021 at 06:08:34AM -0600, Jens Axboe wrote:
>> If we have just one queue type in the plug list, then we can extend our
>> direct issue to cover a full plug list as well.
> 
> I don't think this description matches what the code does.  My impression
> of what the code does it:
> 
> If a plug only has requests for a single queue, and that queue does not
> use an I/O scheduler, we can just issue the list of requests directly
> from the plug.
> 
> Otherwise looks good:
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>

I'll expand it a bit, that is what I was trying to say, but it just
became too brief.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] block: attempt direct issue of plug list
  2021-10-19 12:08 ` [PATCH 2/2] block: attempt direct issue of plug list Jens Axboe
  2021-10-19 13:36   ` Christoph Hellwig
@ 2021-10-26  5:20   ` Shinichiro Kawasaki
  2021-10-26 14:42     ` Jens Axboe
  2021-10-27  1:13   ` Guenter Roeck
  2 siblings, 1 reply; 12+ messages in thread
From: Shinichiro Kawasaki @ 2021-10-26  5:20 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, hch, Damien Le Moal

On Oct 19, 2021 / 06:08, Jens Axboe wrote:
> If we have just one queue type in the plug list, then we can extend our
> direct issue to cover a full plug list as well.
> 
> Signed-off-by: Jens Axboe <axboe@kernel.dk>

Hi Jens, I tried out for-next branch and observed A WARNING "do not call
blocking ops when !TASK_RUNNING" [1]. Reverting this patch from the for-next
branch, the warning disappears. The warning was triggered when mkfs.xfs is
run for memory backed null_blk devices with "none" scheduler. The commands below
recreates it.

# modprobe null_blk nr_devices=0
# mkdir /sys/kernel/config/nullb/nullb0
# declare sysfs=/sys/kernel/config/nullb/nullb0
# echo 1 > "${sysfs}"/memory_backed
# echo 1 > "${sysfs}"/power
# echo none > /sys/block/nullb0/queue/scheduler
# mkfs.xfs /dev/nullb0


Referring the call stack printed, I walked through the function calls. In
__blkdev_direct_IO_simple(), task state is set UNINTERRUPTIBLE. Way
down to might_sleep_if() called from null_queue_rq(), it is warned that
the task state is not RUNNING. This patch adds blk_mq_plug_issue_direct()
call in blk_mq_flush_plug_list(), then the call path was linked from
__blkdev_direct_IO_simple() to null_queue_rq().

__blkdev_direct_IO_simple() block/fops.c
  set_current_state(TASK_UNINTERRUPTIBLE) ... current->__state = TASK_UNINTERRUPTIBLE
  blk_io_schedule()
    io_schedule_timeout() kernel/sched/core.c
      io_schedule_prepare()
        blk_schedule_flush_plug() include/linux/blkdev.h
          blk_flush_plug_list() block/blk-core.c
            blk_mq_flush_plug_list()
              blk_mq_flush_plug_list() block/blk-mq.c  ... this patch added call to blk_mq_plug_issue_direct()
                blk_mq_plug_issue_direct()
                  blk_mq_reqeust_issue_directly()
                    __blk_mq_try_issue_directly()
                      __blk_mq_issue_directly()
                        q->mq_ops->queue_rq()
                          null_queue_rq() drivers/block/null_blk/main.c
                            might_sleep_if(flags & BLK_MQ_F_BLOCKING) include/linux/kernel.h
                              might_sleep()
                                __might_sleep() kernel/sched/core.c ... current->__state != TASK_RUNNING  (WARN_ONCE)

So far, I can't think of a good solution for this warning. Any idea?


[1]

[60501.340746] null_blk: module loaded
[60519.303106] ------------[ cut here ]------------
[60519.308485] do not call blocking ops when !TASK_RUNNING; state=2 set at [<000000005ba5e596>] __blkdev_direct_IO_simple+0x3f8/0x6f0
[60519.320943] WARNING: CPU: 2 PID: 8929 at kernel/sched/core.c:9486 __might_sleep+0x124/0x160
[60519.330001] Modules linked in: null_blk xfs dm_zoned xt_conntrack nf_nat_tftp nf_conntrack_tftp bridge stp llc nft_objref nf_conntrack_netbios_ns nf_conntrack_broadcast nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_tables ebtable_nat ebtable_broute ip6table_nat ip6table_mangle ip6table_raw ip6table_security iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_mangle iptable_raw iptable_security ip_set nfnetlink ebtable_filter rfkill ebtables target_core_user target_core_mod ip6table_filter ip6_tables iptable_filter sunrpc intel_rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm irqbypass iTCO_wdt intel_pmc_bxt iTCO_vendor_support rapl intel_cstate intel_uncore pcspkr joydev ipmi_ssif i2c_i801 lpc_ich i2c_smbus ses enclosure mei_me mei ioatdma wmi acpi_ipmi ipmi_si ipmi_devintf ipmi_msghandler acpi_pad acpi_power_meter zram ip_tables ast
[60519.330166]  drm_vram_helper drm_kms_helper cec drm_ttm_helper crct10dif_pclmul ttm crc32_pclmul crc32c_intel drm ghash_clmulni_intel igb mpt3sas nvme dca nvme_core i2c_algo_bit raid_class scsi_transport_sas fuse [last unloaded: null_blk]
[60519.438458] CPU: 2 PID: 8929 Comm: mkfs.xfs Not tainted 5.15.0-rc6+ #11
[60519.445781] Hardware name: Supermicro Super Server/X10SRL-F, BIOS 2.0 12/17/2015
[60519.453893] RIP: 0010:__might_sleep+0x124/0x160
[60519.459139] Code: 48 8d bb 98 2c 00 00 48 89 fa 48 c1 ea 03 80 3c 02 00 75 31 48 8b 93 98 2c 00 00 44 89 f6 48 c7 c7 e0 f2 88 8a e8 04 eb f9 01 <0f> 0b e9 6d ff ff ff e8 60 d1 66 00 e9 1c ff ff ff e8 66 d1 66 00
[60519.478594] RSP: 0018:ffff8882707ef5a8 EFLAGS: 00010286
[60519.484533] RAX: 0000000000000000 RBX: ffff888125cbb280 RCX: 0000000000000000
[60519.492379] RDX: 0000000000000004 RSI: 0000000000000008 RDI: ffffed104e0fdeab
[60519.500216] RBP: ffffffffc16122c0 R08: 0000000000000001 R09: ffff8888114ad587
[60519.508052] R10: ffffed1102295ab0 R11: 0000000000000001 R12: 0000000000000618
[60519.515886] R13: 0000000000000000 R14: 0000000000000002 R15: ffff88813160a000
[60519.523721] FS:  00007fd79959b400(0000) GS:ffff888811480000(0000) knlGS:0000000000000000
[60519.532509] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[60519.538963] CR2: 000055bf4e3dc000 CR3: 000000029e904005 CR4: 00000000001706e0
[60519.546803] Call Trace:
[60519.549976]  null_queue_rq+0x3ee/0x6b0 [null_blk]
[60519.555407]  __blk_mq_try_issue_directly+0x433/0x680
[60519.561085]  ? __submit_bio+0x63a/0x780
[60519.565636]  ? __blk_mq_get_driver_tag+0x9a0/0x9a0
[60519.571144]  blk_mq_flush_plug_list+0x5f6/0xc40
[60519.576387]  ? iov_iter_get_pages_alloc+0xf50/0xf50
[60519.581980]  ? find_held_lock+0x2c/0x110
[60519.586618]  ? blk_mq_insert_requests+0x440/0x440
[60519.592044]  ? __blkdev_direct_IO_simple+0x3f8/0x6f0
[60519.597719]  blk_flush_plug_list+0x28f/0x410
[60519.602710]  ? blk_start_plug_nr_ios+0x270/0x270
[60519.608039]  ? __blkdev_direct_IO_simple+0x3f8/0x6f0
[60519.613713]  io_schedule_timeout+0xcc/0x150
[60519.618621]  __blkdev_direct_IO_simple+0x475/0x6f0
[60519.624126]  ? blkdev_llseek+0xc0/0xc0
[60519.628598]  ? blkdev_get_block+0xd0/0xd0
[60519.633320]  ? filemap_check_errors+0xe0/0xe0
[60519.638391]  ? find_held_lock+0x2c/0x110
[60519.643024]  ? lock_release+0x1d4/0x690
[60519.647574]  blkdev_direct_IO+0x9b2/0x1110
[60519.652389]  ? filemap_check_errors+0x56/0xe0
[60519.657455]  ? add_watch_to_object+0xa0/0x6e0
[60519.662524]  ? blkdev_bio_end_io+0x490/0x490
[60519.667518]  generic_file_direct_write+0x1a9/0x4a0
[60519.673026]  __generic_file_write_iter+0x1fa/0x480
[60519.678526]  ? lock_is_held_type+0xe0/0x110
[60519.683420]  blkdev_write_iter+0x319/0x5a0
[60519.688231]  ? blkdev_open+0x260/0x260
[60519.692690]  ? lock_downgrade+0x6b0/0x6b0
[60519.697412]  ? do_raw_spin_unlock+0x55/0x1f0
[60519.702392]  new_sync_write+0x359/0x5e0
[60519.706941]  ? new_sync_read+0x5d0/0x5d0
[60519.711582]  ? __cond_resched+0x15/0x30
[60519.716124]  ? inode_security+0x56/0xf0
[60519.720688]  vfs_write+0x5e4/0x8e0
[60519.724805]  __x64_sys_pwrite64+0x17c/0x1d0
[60519.729698]  ? vfs_write+0x8e0/0x8e0
[60519.733982]  ? syscall_enter_from_user_mode+0x21/0x70
[60519.739747]  do_syscall_64+0x3b/0x90
[60519.744037]  entry_SYSCALL_64_after_hwframe+0x44/0xae
[60519.749796] RIP: 0033:0x7fd7997c125a
[60519.754088] Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb ba 0f 1f 00 f3 0f 1e fa 49 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 15 b8 12 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 5e c3 0f 1f 44 00 00 48 83 ec 28 48 89 54 24
[60519.773543] RSP: 002b:00007ffe8d456718 EFLAGS: 00000246 ORIG_RAX: 0000000000000012
[60519.781816] RAX: ffffffffffffffda RBX: 00007ffe8d456e10 RCX: 00007fd7997c125a
[60519.789656] RDX: 0000000000020000 RSI: 000055bf4e3bd600 RDI: 0000000000000004
[60519.797494] RBP: 0000000000020000 R08: 000055bf4e3bd600 R09: 00007fd79975fa60
[60519.805333] R10: 00000003fffe0000 R11: 0000000000000246 R12: 000055bf4e3b9710
[60519.813172] R13: 0000000000000004 R14: 000055bf4e3bd600 R15: 0000000000001000
[60519.821028] irq event stamp: 20385
[60519.825139] hardirqs last  enabled at (20395): [<ffffffff883481e0>] __up_console_sem+0x60/0x70
[60519.834455] hardirqs last disabled at (20404): [<ffffffff883481c5>] __up_console_sem+0x45/0x70
[60519.843763] softirqs last  enabled at (20372): [<ffffffff881e6a7c>] __irq_exit_rcu+0x19c/0x200
[60519.853079] softirqs last disabled at (20367): [<ffffffff881e6a7c>] __irq_exit_rcu+0x19c/0x200
[60519.862389] ---[ end trace be9623465002e439 ]---

-- 
Best Regards,
Shin'ichiro Kawasaki

> ---
>  block/blk-core.c       |  1 +
>  block/blk-mq.c         | 60 ++++++++++++++++++++++++++++++++++++++++++
>  include/linux/blkdev.h |  1 +
>  3 files changed, 62 insertions(+)
> 
> diff --git a/block/blk-core.c b/block/blk-core.c
> index 14d20909f61a..e6ad5b51d0c3 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -1555,6 +1555,7 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
>  	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
>  	plug->rq_count = 0;
>  	plug->multiple_queues = false;
> +	plug->has_elevator = false;
>  	plug->nowait = false;
>  	INIT_LIST_HEAD(&plug->cb_list);
>  
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 620233b85af2..d0fe86b46d1b 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -2147,6 +2147,58 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
>  	spin_unlock(&ctx->lock);
>  }
>  
> +static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
> +			      bool from_schedule)
> +{
> +	if (hctx->queue->mq_ops->commit_rqs) {
> +		trace_block_unplug(hctx->queue, *queued, !from_schedule);
> +		hctx->queue->mq_ops->commit_rqs(hctx);
> +	}
> +	*queued = 0;
> +}
> +
> +static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
> +{
> +	struct blk_mq_hw_ctx *hctx = NULL;
> +	struct request *rq;
> +	int queued = 0;
> +	int errors = 0;
> +
> +	while ((rq = rq_list_pop(&plug->mq_list))) {
> +		bool last = rq_list_empty(plug->mq_list);
> +		blk_status_t ret;
> +
> +		if (hctx != rq->mq_hctx) {
> +			if (hctx)
> +				blk_mq_commit_rqs(hctx, &queued, from_schedule);
> +			hctx = rq->mq_hctx;
> +		}
> +
> +		ret = blk_mq_request_issue_directly(rq, last);
> +		switch (ret) {
> +		case BLK_STS_OK:
> +			queued++;
> +			break;
> +		case BLK_STS_RESOURCE:
> +		case BLK_STS_DEV_RESOURCE:
> +			blk_mq_request_bypass_insert(rq, false, last);
> +			blk_mq_commit_rqs(hctx, &queued, from_schedule);
> +			return;
> +		default:
> +			blk_mq_end_request(rq, ret);
> +			errors++;
> +			break;
> +		}
> +	}
> +
> +	/*
> +	 * If we didn't flush the entire list, we could have told the driver
> +	 * there was more coming, but that turned out to be a lie.
> +	 */
> +	if (errors)
> +		blk_mq_commit_rqs(hctx, &queued, from_schedule);
> +}
> +
>  void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
>  {
>  	struct blk_mq_hw_ctx *this_hctx;
> @@ -2158,6 +2210,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
>  		return;
>  	plug->rq_count = 0;
>  
> +	if (!plug->multiple_queues && !plug->has_elevator) {
> +		blk_mq_plug_issue_direct(plug, from_schedule);
> +		if (rq_list_empty(plug->mq_list))
> +			return;
> +	}
> +
>  	this_hctx = NULL;
>  	this_ctx = NULL;
>  	depth = 0;
> @@ -2374,6 +2432,8 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
>  		if (nxt && nxt->q != rq->q)
>  			plug->multiple_queues = true;
>  	}
> +	if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
> +		plug->has_elevator = true;
>  	rq->rq_next = NULL;
>  	rq_list_add(&plug->mq_list, rq);
>  	plug->rq_count++;
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index 80668e316eea..2e93682f8f68 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -720,6 +720,7 @@ struct blk_plug {
>  	unsigned short rq_count;
>  
>  	bool multiple_queues;
> +	bool has_elevator;
>  	bool nowait;
>  
>  	struct list_head cb_list; /* md requires an unplug callback */
> -- 
> 2.33.1
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] block: attempt direct issue of plug list
  2021-10-26  5:20   ` Shinichiro Kawasaki
@ 2021-10-26 14:42     ` Jens Axboe
  2021-10-27  2:16       ` Shinichiro Kawasaki
  0 siblings, 1 reply; 12+ messages in thread
From: Jens Axboe @ 2021-10-26 14:42 UTC (permalink / raw)
  To: Shinichiro Kawasaki; +Cc: linux-block, hch, Damien Le Moal

On 10/25/21 11:20 PM, Shinichiro Kawasaki wrote:
> On Oct 19, 2021 / 06:08, Jens Axboe wrote:
>> If we have just one queue type in the plug list, then we can extend our
>> direct issue to cover a full plug list as well.
>>
>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> 
> Hi Jens, I tried out for-next branch and observed A WARNING "do not call
> blocking ops when !TASK_RUNNING" [1]. Reverting this patch from the for-next
> branch, the warning disappears. The warning was triggered when mkfs.xfs is
> run for memory backed null_blk devices with "none" scheduler. The commands below
> recreates it.
> 
> # modprobe null_blk nr_devices=0
> # mkdir /sys/kernel/config/nullb/nullb0
> # declare sysfs=/sys/kernel/config/nullb/nullb0
> # echo 1 > "${sysfs}"/memory_backed
> # echo 1 > "${sysfs}"/power
> # echo none > /sys/block/nullb0/queue/scheduler
> # mkfs.xfs /dev/nullb0
> 
> 
> Referring the call stack printed, I walked through the function calls. In
> __blkdev_direct_IO_simple(), task state is set UNINTERRUPTIBLE. Way
> down to might_sleep_if() called from null_queue_rq(), it is warned that
> the task state is not RUNNING. This patch adds blk_mq_plug_issue_direct()
> call in blk_mq_flush_plug_list(), then the call path was linked from
> __blkdev_direct_IO_simple() to null_queue_rq().
> 
> __blkdev_direct_IO_simple() block/fops.c
>   set_current_state(TASK_UNINTERRUPTIBLE) ... current->__state = TASK_UNINTERRUPTIBLE
>   blk_io_schedule()
>     io_schedule_timeout() kernel/sched/core.c
>       io_schedule_prepare()
>         blk_schedule_flush_plug() include/linux/blkdev.h
>           blk_flush_plug_list() block/blk-core.c
>             blk_mq_flush_plug_list()
>               blk_mq_flush_plug_list() block/blk-mq.c  ... this patch added call to blk_mq_plug_issue_direct()
>                 blk_mq_plug_issue_direct()
>                   blk_mq_reqeust_issue_directly()
>                     __blk_mq_try_issue_directly()
>                       __blk_mq_issue_directly()
>                         q->mq_ops->queue_rq()
>                           null_queue_rq() drivers/block/null_blk/main.c
>                             might_sleep_if(flags & BLK_MQ_F_BLOCKING) include/linux/kernel.h
>                               might_sleep()
>                                 __might_sleep() kernel/sched/core.c ... current->__state != TASK_RUNNING  (WARN_ONCE)
> 
> So far, I can't think of a good solution for this warning. Any idea?
> 
> 
> [1]
> 
> [60501.340746] null_blk: module loaded
> [60519.303106] ------------[ cut here ]------------
> [60519.308485] do not call blocking ops when !TASK_RUNNING; state=2 set at [<000000005ba5e596>] __blkdev_direct_IO_simple+0x3f8/0x6f0
> [60519.320943] WARNING: CPU: 2 PID: 8929 at kernel/sched/core.c:9486 __might_sleep+0x124/0x160
> [60519.330001] Modules linked in: null_blk xfs dm_zoned xt_conntrack nf_nat_tftp nf_conntrack_tftp bridge stp llc nft_objref nf_conntrack_netbios_ns nf_conntrack_broadcast nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_tables ebtable_nat ebtable_broute ip6table_nat ip6table_mangle ip6table_raw ip6table_security iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_mangle iptable_raw iptable_security ip_set nfnetlink ebtable_filter rfkill ebtables target_core_user target_core_mod ip6table_filter ip6_tables iptable_filter sunrpc intel_rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm irqbypass iTCO_wdt intel_pmc_bxt iTCO_vendor_support rapl intel_cstate intel_uncore pcspkr joydev ipmi_ssif i2c_i801 lpc_ich i2c_smbus ses enclosure mei_me mei ioatdma wmi acpi_ipmi ipmi_si ipmi_devintf ipmi_msghandler acpi_pad acpi_power_meter zram ip_tables ast
> [60519.330166]  drm_vram_helper drm_kms_helper cec drm_ttm_helper crct10dif_pclmul ttm crc32_pclmul crc32c_intel drm ghash_clmulni_intel igb mpt3sas nvme dca nvme_core i2c_algo_bit raid_class scsi_transport_sas fuse [last unloaded: null_blk]
> [60519.438458] CPU: 2 PID: 8929 Comm: mkfs.xfs Not tainted 5.15.0-rc6+ #11
> [60519.445781] Hardware name: Supermicro Super Server/X10SRL-F, BIOS 2.0 12/17/2015
> [60519.453893] RIP: 0010:__might_sleep+0x124/0x160
> [60519.459139] Code: 48 8d bb 98 2c 00 00 48 89 fa 48 c1 ea 03 80 3c 02 00 75 31 48 8b 93 98 2c 00 00 44 89 f6 48 c7 c7 e0 f2 88 8a e8 04 eb f9 01 <0f> 0b e9 6d ff ff ff e8 60 d1 66 00 e9 1c ff ff ff e8 66 d1 66 00
> [60519.478594] RSP: 0018:ffff8882707ef5a8 EFLAGS: 00010286
> [60519.484533] RAX: 0000000000000000 RBX: ffff888125cbb280 RCX: 0000000000000000
> [60519.492379] RDX: 0000000000000004 RSI: 0000000000000008 RDI: ffffed104e0fdeab
> [60519.500216] RBP: ffffffffc16122c0 R08: 0000000000000001 R09: ffff8888114ad587
> [60519.508052] R10: ffffed1102295ab0 R11: 0000000000000001 R12: 0000000000000618
> [60519.515886] R13: 0000000000000000 R14: 0000000000000002 R15: ffff88813160a000
> [60519.523721] FS:  00007fd79959b400(0000) GS:ffff888811480000(0000) knlGS:0000000000000000
> [60519.532509] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [60519.538963] CR2: 000055bf4e3dc000 CR3: 000000029e904005 CR4: 00000000001706e0
> [60519.546803] Call Trace:
> [60519.549976]  null_queue_rq+0x3ee/0x6b0 [null_blk]
> [60519.555407]  __blk_mq_try_issue_directly+0x433/0x680
> [60519.561085]  ? __submit_bio+0x63a/0x780
> [60519.565636]  ? __blk_mq_get_driver_tag+0x9a0/0x9a0
> [60519.571144]  blk_mq_flush_plug_list+0x5f6/0xc40
> [60519.576387]  ? iov_iter_get_pages_alloc+0xf50/0xf50
> [60519.581980]  ? find_held_lock+0x2c/0x110
> [60519.586618]  ? blk_mq_insert_requests+0x440/0x440
> [60519.592044]  ? __blkdev_direct_IO_simple+0x3f8/0x6f0
> [60519.597719]  blk_flush_plug_list+0x28f/0x410
> [60519.602710]  ? blk_start_plug_nr_ios+0x270/0x270
> [60519.608039]  ? __blkdev_direct_IO_simple+0x3f8/0x6f0
> [60519.613713]  io_schedule_timeout+0xcc/0x150
> [60519.618621]  __blkdev_direct_IO_simple+0x475/0x6f0
> [60519.624126]  ? blkdev_llseek+0xc0/0xc0
> [60519.628598]  ? blkdev_get_block+0xd0/0xd0
> [60519.633320]  ? filemap_check_errors+0xe0/0xe0
> [60519.638391]  ? find_held_lock+0x2c/0x110
> [60519.643024]  ? lock_release+0x1d4/0x690
> [60519.647574]  blkdev_direct_IO+0x9b2/0x1110
> [60519.652389]  ? filemap_check_errors+0x56/0xe0
> [60519.657455]  ? add_watch_to_object+0xa0/0x6e0
> [60519.662524]  ? blkdev_bio_end_io+0x490/0x490
> [60519.667518]  generic_file_direct_write+0x1a9/0x4a0
> [60519.673026]  __generic_file_write_iter+0x1fa/0x480
> [60519.678526]  ? lock_is_held_type+0xe0/0x110
> [60519.683420]  blkdev_write_iter+0x319/0x5a0
> [60519.688231]  ? blkdev_open+0x260/0x260
> [60519.692690]  ? lock_downgrade+0x6b0/0x6b0
> [60519.697412]  ? do_raw_spin_unlock+0x55/0x1f0
> [60519.702392]  new_sync_write+0x359/0x5e0
> [60519.706941]  ? new_sync_read+0x5d0/0x5d0
> [60519.711582]  ? __cond_resched+0x15/0x30
> [60519.716124]  ? inode_security+0x56/0xf0
> [60519.720688]  vfs_write+0x5e4/0x8e0
> [60519.724805]  __x64_sys_pwrite64+0x17c/0x1d0
> [60519.729698]  ? vfs_write+0x8e0/0x8e0
> [60519.733982]  ? syscall_enter_from_user_mode+0x21/0x70
> [60519.739747]  do_syscall_64+0x3b/0x90
> [60519.744037]  entry_SYSCALL_64_after_hwframe+0x44/0xae
> [60519.749796] RIP: 0033:0x7fd7997c125a
> [60519.754088] Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb ba 0f 1f 00 f3 0f 1e fa 49 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 15 b8 12 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 5e c3 0f 1f 44 00 00 48 83 ec 28 48 89 54 24
> [60519.773543] RSP: 002b:00007ffe8d456718 EFLAGS: 00000246 ORIG_RAX: 0000000000000012
> [60519.781816] RAX: ffffffffffffffda RBX: 00007ffe8d456e10 RCX: 00007fd7997c125a
> [60519.789656] RDX: 0000000000020000 RSI: 000055bf4e3bd600 RDI: 0000000000000004
> [60519.797494] RBP: 0000000000020000 R08: 000055bf4e3bd600 R09: 00007fd79975fa60
> [60519.805333] R10: 00000003fffe0000 R11: 0000000000000246 R12: 000055bf4e3b9710
> [60519.813172] R13: 0000000000000004 R14: 000055bf4e3bd600 R15: 0000000000001000
> [60519.821028] irq event stamp: 20385
> [60519.825139] hardirqs last  enabled at (20395): [<ffffffff883481e0>] __up_console_sem+0x60/0x70
> [60519.834455] hardirqs last disabled at (20404): [<ffffffff883481c5>] __up_console_sem+0x45/0x70
> [60519.843763] softirqs last  enabled at (20372): [<ffffffff881e6a7c>] __irq_exit_rcu+0x19c/0x200
> [60519.853079] softirqs last disabled at (20367): [<ffffffff881e6a7c>] __irq_exit_rcu+0x19c/0x200
> [60519.862389] ---[ end trace be9623465002e439 ]---

This one should fix it:

https://git.kernel.dk/cgit/linux-block/commit/?h=for-5.16/block&id=ff1552232b3612edff43a95746a4e78e231ef3d4

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] block: attempt direct issue of plug list
  2021-10-19 12:08 ` [PATCH 2/2] block: attempt direct issue of plug list Jens Axboe
  2021-10-19 13:36   ` Christoph Hellwig
  2021-10-26  5:20   ` Shinichiro Kawasaki
@ 2021-10-27  1:13   ` Guenter Roeck
  2021-10-27  2:36     ` Jens Axboe
  2 siblings, 1 reply; 12+ messages in thread
From: Guenter Roeck @ 2021-10-27  1:13 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, hch

Hi,

On Tue, Oct 19, 2021 at 06:08:34AM -0600, Jens Axboe wrote:
> If we have just one queue type in the plug list, then we can extend our
> direct issue to cover a full plug list as well.
> 
> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> Reviewed-by: Christoph Hellwig <hch@lst.de>

This patch results in a number of warning tracebacks in linux-next.
Reverting it fixes the problem. Example tracebacks and bisect result
attached.

Guenter

---
boot from mmc, ext2:

[   10.868421] ------------[ cut here ]------------
[   10.868795] WARNING: CPU: 0 PID: 1 at kernel/sched/core.c:9477 __might_sleep+0x70/0x98
[   10.869013] do not call blocking ops when !TASK_RUNNING; state=2 set at [<(ptrval)>] prepare_to_wait+0x6c/0xb0
[   10.869350] CPU: 0 PID: 1 Comm: swapper Not tainted 5.15.0-rc7-next-20211026-sx1 #1
[   10.869547] Hardware name: OMAP310 based Siemens SX1
[   10.869779] [<c000dc00>] (unwind_backtrace) from [<c000cc54>] (show_stack+0x10/0x18)
[   10.870010] [<c000cc54>] (show_stack) from [<c0430830>] (dump_stack+0x20/0x2c)
[   10.870197] [<c0430830>] (dump_stack) from [<c0018bf8>] (__warn+0xac/0xec)
[   10.870372] [<c0018bf8>] (__warn) from [<c0424218>] (warn_slowpath_fmt+0x68/0x80)
[   10.870556] [<c0424218>] (warn_slowpath_fmt) from [<c003ba2c>] (__might_sleep+0x70/0x98)
[   10.870751] [<c003ba2c>] (__might_sleep) from [<c03f4fb4>] (__mmc_claim_host+0x54/0x1d0)
[   10.870947] [<c03f4fb4>] (__mmc_claim_host) from [<c04053c8>] (mmc_mq_queue_rq+0x12c/0x214)
[   10.871148] [<c04053c8>] (mmc_mq_queue_rq) from [<c02b35c8>] (__blk_mq_try_issue_directly+0xe8/0x134)
[   10.871368] [<c02b35c8>] (__blk_mq_try_issue_directly) from [<c02b45c0>] (blk_mq_request_issue_directly+0x30/0x50)
[   10.871609] [<c02b45c0>] (blk_mq_request_issue_directly) from [<c02b4750>] (blk_mq_flush_plug_list+0x170/0x1f4)
[   10.871842] [<c02b4750>] (blk_mq_flush_plug_list) from [<c02a9ee8>] (blk_flush_plug+0x50/0xec)
[   10.872047] [<c02a9ee8>] (blk_flush_plug) from [<c003b410>] (io_schedule_prepare+0x40/0x50)
[   10.872244] [<c003b410>] (io_schedule_prepare) from [<c04324d8>] (io_schedule+0xc/0x24)
[   10.872436] [<c04324d8>] (io_schedule) from [<c0432814>] (bit_wait_io+0xc/0x34)
[   10.872616] [<c0432814>] (bit_wait_io) from [<c0432548>] (__wait_on_bit+0x58/0x98)
[   10.872801] [<c0432548>] (__wait_on_bit) from [<c04325fc>] (out_of_line_wait_on_bit+0x74/0x84)
[   10.873004] [<c04325fc>] (out_of_line_wait_on_bit) from [<c010dfdc>] (__wait_on_buffer+0x34/0x44)
[   10.873215] [<c010dfdc>] (__wait_on_buffer) from [<c018b688>] (wait_on_buffer+0x24/0x34)
[   10.873411] [<c018b688>] (wait_on_buffer) from [<c018d6d4>] (ext4_read_bh+0x58/0x68)
[   10.873598] [<c018d6d4>] (ext4_read_bh) from [<c015aad0>] (ext4_get_branch+0x9c/0x124)
[   10.873789] [<c015aad0>] (ext4_get_branch) from [<c015ada8>] (ext4_ind_map_blocks+0x158/0xa0c)
[   10.873991] [<c015ada8>] (ext4_ind_map_blocks) from [<c0161f74>] (ext4_map_blocks+0x320/0x558)
[   10.874197] [<c0161f74>] (ext4_map_blocks) from [<c017ec78>] (ext4_mpage_readpages+0x31c/0x6a8)
[   10.874403] [<c017ec78>] (ext4_mpage_readpages) from [<c0160044>] (ext4_readahead+0x24/0x2c)
[   10.874602] [<c0160044>] (ext4_readahead) from [<c009ba18>] (read_pages+0x48/0x118)
[   10.874793] [<c009ba18>] (read_pages) from [<c009bda0>] (page_cache_ra_unbounded+0xfc/0x1e8)
[   10.874994] [<c009bda0>] (page_cache_ra_unbounded) from [<c0094a3c>] (filemap_read+0x19c/0x81c)
[   10.875202] [<c0094a3c>] (filemap_read) from [<c0154524>] (ext4_file_read_iter+0x9c/0xf4)
[   10.875400] [<c0154524>] (ext4_file_read_iter) from [<c00da450>] (__kernel_read+0xc4/0x14c)
[   10.875604] [<c00da450>] (__kernel_read) from [<c00e066c>] (bprm_execve+0x204/0x4bc)
[   10.875795] [<c00e066c>] (bprm_execve) from [<c00e1370>] (kernel_execve+0xe4/0x114)
[   10.875982] [<c00e1370>] (kernel_execve) from [<c0423a6c>] (run_init_process+0x60/0x98)
[   10.876175] [<c0423a6c>] (run_init_process) from [<c0423ab0>] (try_to_run_init_process+0xc/0x3c)
[   10.876383] [<c0423ab0>] (try_to_run_init_process) from [<c04314f0>] (kernel_init+0x90/0x108)
[   10.876585] [<c04314f0>] (kernel_init) from [<c00084d0>] (ret_from_fork+0x14/0x24)
[   10.876797] Exception stack(0xc103bfb0 to 0xc103bff8)
[   10.877217] bfa0:                                     00000000 00000000 00000000 00000000
[   10.877439] bfc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
[   10.877634] bfe0: 00000000 00000000 00000000 00000000 00000013 00000000
[   10.877830] irq event stamp: 65423
[   10.877943] hardirqs last  enabled at (65431): [<c004ea98>] __up_console_sem+0x38/0x58
[   10.878178] hardirqs last disabled at (65438): [<c004ea80>] __up_console_sem+0x20/0x58
[   10.878372] softirqs last  enabled at (65008): [<c0009734>] __do_softirq+0x32c/0x404
[   10.878579] softirqs last disabled at (64993): [<c001cb00>] irq_exit+0x120/0x15c
[   10.878788] ---[ end trace 3925c0327f8d873a ]---

---
boot from flash, squashfs:

[   10.742470] ------------[ cut here ]------------
[   10.742812] WARNING: CPU: 0 PID: 1 at kernel/sched/core.c:9477 __might_sleep+0x70/0x98
[   10.743021] do not call blocking ops when !TASK_RUNNING; state=2 set at [<(ptrval)>] __wait_for_common+0xa4/0x158
[   10.743365] CPU: 0 PID: 1 Comm: swapper Not tainted 5.15.0-rc7-next-20211026-sx1 #1
[   10.743593] Hardware name: OMAP310 based Siemens SX1
[   10.743815] [<c000dc00>] (unwind_backtrace) from [<c000cc54>] (show_stack+0x10/0x18)
[   10.744036] [<c000cc54>] (show_stack) from [<c0430830>] (dump_stack+0x20/0x2c)
[   10.744215] [<c0430830>] (dump_stack) from [<c0018bf8>] (__warn+0xac/0xec)
[   10.744383] [<c0018bf8>] (__warn) from [<c0424218>] (warn_slowpath_fmt+0x68/0x80)
[   10.744709] [<c0424218>] (warn_slowpath_fmt) from [<c003ba2c>] (__might_sleep+0x70/0x98)
[   10.744922] [<c003ba2c>] (__might_sleep) from [<c0433654>] (__mutex_lock+0x24/0x264)
[   10.745106] [<c0433654>] (__mutex_lock) from [<c0433968>] (mutex_lock_nested+0x18/0x24)
[   10.745306] [<c0433968>] (mutex_lock_nested) from [<c03e46f0>] (mtd_queue_rq+0xcc/0x3f0)
[   10.745497] [<c03e46f0>] (mtd_queue_rq) from [<c02b35c8>] (__blk_mq_try_issue_directly+0xe8/0x134)
[   10.745702] [<c02b35c8>] (__blk_mq_try_issue_directly) from [<c02b45c0>] (blk_mq_request_issue_directly+0x30/0x50)
[   10.745928] [<c02b45c0>] (blk_mq_request_issue_directly) from [<c02b4750>] (blk_mq_flush_plug_list+0x170/0x1f4)
[   10.746148] [<c02b4750>] (blk_mq_flush_plug_list) from [<c02a9ee8>] (blk_flush_plug+0x50/0xec)
[   10.746345] [<c02a9ee8>] (blk_flush_plug) from [<c003b410>] (io_schedule_prepare+0x40/0x50)
[   10.746535] [<c003b410>] (io_schedule_prepare) from [<c04324a4>] (io_schedule_timeout+0x10/0x38)
[   10.746732] [<c04324a4>] (io_schedule_timeout) from [<c04329cc>] (__wait_for_common+0xd8/0x158)
[   10.746928] [<c04329cc>] (__wait_for_common) from [<c02a5068>] (submit_bio_wait+0x4c/0x6c)
[   10.747119] [<c02a5068>] (submit_bio_wait) from [<c01b22f4>] (squashfs_bio_read+0x154/0x20c)
[   10.747314] [<c01b22f4>] (squashfs_bio_read) from [<c01b2688>] (squashfs_read_data+0x2dc/0x3d8)
[   10.747510] [<c01b2688>] (squashfs_read_data) from [<c01b2a3c>] (squashfs_cache_get+0x23c/0x2e8)
[   10.747707] [<c01b2a3c>] (squashfs_cache_get) from [<c01b5ba4>] (squashfs_readpage_block+0x28/0x80)
[   10.747909] [<c01b5ba4>] (squashfs_readpage_block) from [<c01b3f74>] (squashfs_readpage+0x580/0x5f4)
[   10.748112] [<c01b3f74>] (squashfs_readpage) from [<c009baa4>] (read_pages+0xd4/0x118)
[   10.748297] [<c009baa4>] (read_pages) from [<c009bda0>] (page_cache_ra_unbounded+0xfc/0x1e8)
[   10.748490] [<c009bda0>] (page_cache_ra_unbounded) from [<c0094a3c>] (filemap_read+0x19c/0x81c)
[   10.748688] [<c0094a3c>] (filemap_read) from [<c00da450>] (__kernel_read+0xc4/0x14c)
[   10.748870] [<c00da450>] (__kernel_read) from [<c00e066c>] (bprm_execve+0x204/0x4bc)
[   10.749051] [<c00e066c>] (bprm_execve) from [<c00e1370>] (kernel_execve+0xe4/0x114)
[   10.749232] [<c00e1370>] (kernel_execve) from [<c0423a6c>] (run_init_process+0x60/0x98)
[   10.749417] [<c0423a6c>] (run_init_process) from [<c0423ab0>] (try_to_run_init_process+0xc/0x3c)
[   10.749615] [<c0423ab0>] (try_to_run_init_process) from [<c04314f0>] (kernel_init+0x90/0x108)
[   10.749808] [<c04314f0>] (kernel_init) from [<c00084d0>] (ret_from_fork+0x14/0x24)
[   10.750013] Exception stack(0xc103bfb0 to 0xc103bff8)
[   10.750280] bfa0:                                     00000000 00000000 00000000 00000000
[   10.750482] bfc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
[   10.750667] bfe0: 00000000 00000000 00000000 00000000 00000013 00000000
[   10.750851] irq event stamp: 67803
[   10.750958] hardirqs last  enabled at (67811): [<c004ea98>] __up_console_sem+0x38/0x58
[   10.751179] hardirqs last disabled at (67818): [<c004ea80>] __up_console_sem+0x20/0x58
[   10.751364] softirqs last  enabled at (67552): [<c0009734>] __do_softirq+0x32c/0x404
[   10.751559] softirqs last disabled at (67547): [<c001cb00>] irq_exit+0x120/0x15c
[   10.751753] ---[ end trace e6cf94fcaae1cea0 ]---

---
boot from flash, ext2:

[   12.328280] WARNING: CPU: 0 PID: 1 at kernel/sched/core.c:9477 __might_sleep+0x98/0xac
[   12.328504] do not call blocking ops when !TASK_RUNNING; state=2 set at [<80154954>] prepare_to_wait+0x4c/0x12c
[   12.328787] CPU: 0 PID: 1 Comm: swapper Not tainted 5.15.0-rc7-next-20211026 #1
[   12.328942] Hardware name: Generic DT based system
[   12.329134] Backtrace:
[   12.329337] [<80b7d33c>] (dump_backtrace) from [<80b7d710>] (show_stack+0x20/0x24)
[   12.329609]  r7:00002505 r6:00000009 r5:8014c45c r4:80e397cc
[   12.329752] [<80b7d6f0>] (show_stack) from [<80b9705c>] (dump_stack+0x28/0x30)
[   12.329895] [<80b97034>] (dump_stack) from [<80115040>] (__warn+0xe4/0x164)
[   12.330035]  r5:8014c45c r4:80de4198
[   12.330120] [<80114f5c>] (__warn) from [<80b7dda0>] (warn_slowpath_fmt+0xa0/0xe4)
[   12.330264]  r7:8014c45c r6:00002505 r5:80de4198 r4:80de444c
[   12.330371] [<80b7dd04>] (warn_slowpath_fmt) from [<8014c45c>] (__might_sleep+0x98/0xac)
[   12.330523]  r8:00000001 r7:8287827c r6:00000001 r5:00000248 r4:80de4914
[   12.330641] [<8014c3c4>] (__might_sleep) from [<80b9d28c>] (__mutex_lock+0x48/0x6f4)
[   12.330792]  r6:00000000 r5:00000000 r4:82878210
[   12.330889] [<80b9d244>] (__mutex_lock) from [<80b9da60>] (mutex_lock_nested+0x2c/0x34)
[   12.331049]  r10:82878200 r9:82382920 r8:00000001 r7:8287827c r6:82878210 r5:828a5038
[   12.331185]  r4:828a5000
[   12.331259] [<80b9da34>] (mutex_lock_nested) from [<807ca3dc>] (mtd_queue_rq+0x1b4/0x560)
[   12.331411] [<807ca228>] (mtd_queue_rq) from [<805e04d4>] (__blk_mq_try_issue_directly+0x178/0x1c4)
[   12.331577]  r10:8197fbbc r9:82382920 r8:00000001 r7:00000001 r6:8287ce00 r5:00000000
[   12.331708]  r4:828a5000
[   12.331781] [<805e035c>] (__blk_mq_try_issue_directly) from [<805e240c>] (blk_mq_flush_plug_list+0x338/0x5c0)
[   12.331957]  r9:8197e000 r8:8197f77c r7:8287ce00 r6:8197fbac r5:00000001 r4:828a5000
[   12.332087] [<805e20d4>] (blk_mq_flush_plug_list) from [<805d42a8>] (blk_flush_plug+0xf8/0x144)
[   12.332249]  r10:8197fbbc r9:8197fbac r8:80b9ba20 r7:8197f860 r6:00000001 r5:8197f854
[   12.332379]  r4:00000000
[   12.332452] [<805d41b0>] (blk_flush_plug) from [<80b9b584>] (io_schedule+0x50/0x78)
[   12.332602]  r10:00000008 r9:00000002 r8:80b9ba20 r7:8197f860 r6:81004254 r5:8197f854
[   12.332731]  r4:00000000
[   12.332805] [<80b9b534>] (io_schedule) from [<80b9ba3c>] (bit_wait_io+0x1c/0x80)
[   12.332946]  r5:8197f854 r4:00000002
[   12.333031] [<80b9ba20>] (bit_wait_io) from [<80b9b60c>] (__wait_on_bit+0x60/0xac)
[   12.333172]  r5:8197f854 r4:8197e000
[   12.333256] [<80b9b5ac>] (__wait_on_bit) from [<80b9b6f8>] (out_of_line_wait_on_bit+0xa0/0xd4)
[   12.333414]  r9:8277d3b0 r8:801550cc r7:61c88647 r6:8197f86c r5:81002020 r4:8197e000
[   12.333542] [<80b9b658>] (out_of_line_wait_on_bit) from [<8031b554>] (__wait_on_buffer+0x44/0x50)
[   12.333705]  r8:82bbd000 r7:40000113 r6:60000113 r5:00000000 r4:827808c8
[   12.333823] [<8031b510>] (__wait_on_buffer) from [<803ed810>] (ext4_read_bh+0x13c/0x190)
[   12.333975]  r5:00000000 r4:827808c8
[   12.334061] [<803ed6d4>] (ext4_read_bh) from [<803a0f00>] (ext4_get_branch+0xcc/0x168)
[   12.334211]  r7:8197f9b0 r6:00000001 r5:827808c8 r4:8197f9e4
[   12.334318] [<803a0e34>] (ext4_get_branch) from [<803a12d4>] (ext4_ind_map_blocks+0x1e4/0xd1c)
[   12.334477]  r10:00000000 r9:00000000 r8:00000004 r7:00000000 r6:8277d3b0 r5:0000000c
[   12.334608]  r4:00000000
[   12.334681] [<803a10f0>] (ext4_ind_map_blocks) from [<803a9d8c>] (ext4_map_blocks+0x114/0x668)
[   12.334849]  r10:8277d360 r9:00000000 r8:00000004 r7:00000000 r6:8277d3b0 r5:0000000c
[   12.334980]  r4:8197fb08
[   12.335053] [<803a9c78>] (ext4_map_blocks) from [<803d39bc>] (ext4_mpage_readpages+0x564/0x858)
[   12.335215]  r10:00000004 r9:00000000 r8:00000004 r7:00000004 r6:00000000 r5:0000000c
[   12.335345]  r4:00000000
[   12.335419] [<803d3458>] (ext4_mpage_readpages) from [<803a7bd0>] (ext4_readahead+0x44/0x48)
[   12.335579]  r10:8772af00 r9:01112cca r8:803a7b8c r7:80c18298 r6:00000000 r5:8197fc14
[   12.335710]  r4:8197fcd4
[   12.335784] [<803a7b8c>] (ext4_readahead) from [<80274b68>] (read_pages+0x94/0x27c)
[   12.335927] [<80274ad4>] (read_pages) from [<8027512c>] (page_cache_ra_unbounded+0x204/0x2a0)
[   12.336083]  r10:8772af00 r9:01112cca r8:8277d4f8 r7:8197fcd4 r6:00000003 r5:00000004
[   12.336213]  r4:8772af00
[   12.336287] [<80274f28>] (page_cache_ra_unbounded) from [<802754a0>] (ondemand_readahead+0x2d8/0x380)
[   12.336450]  r10:00000000 r9:00000000 r8:00000020 r7:00000000 r6:00000003 r5:82eb00f8
[   12.336580]  r4:8197fcd4
[   12.336654] [<802751c8>] (ondemand_readahead) from [<8027573c>] (page_cache_sync_ra+0x6c/0x70)
[   12.336811]  r10:00000000 r9:8197fe60 r8:00000000 r7:00000001 r6:8277d4f8 r5:8197fd54
[   12.336941]  r4:8197fe60
[   12.337014] [<802756d0>] (page_cache_sync_ra) from [<80267e04>] (filemap_get_pages+0x114/0x818)
[   12.337167] [<80267cf0>] (filemap_get_pages) from [<8026aad8>] (filemap_read+0xf0/0x3f0)
[   12.337319]  r10:8197fe48 r9:8197fe60 r8:00000000 r7:82eb0020 r6:8197fe48 r5:00000000
[   12.337450]  r4:8277d3b0
[   12.337523] [<8026a9e8>] (filemap_read) from [<8026aee0>] (generic_file_read_iter+0x108/0x164)
[   12.337680]  r10:82bb385c r9:00000000 r8:00000000 r7:00000100 r6:8197fe48 r5:00000000
[   12.337998]  r4:8197fe60
[   12.338092] [<8026add8>] (generic_file_read_iter) from [<80399048>] (ext4_file_read_iter+0x5c/0x140)
[   12.338266]  r10:82bb385c r9:00000000 r8:00000000 r7:8197fef8 r6:8277d3b0 r5:8197fe60
[   12.338396]  r4:8197fe48
[   12.338470] [<80398fec>] (ext4_file_read_iter) from [<802ce8a8>] (__kernel_read+0x130/0x2e0)
[   12.338626]  r7:8197fef8 r6:00000100 r5:00000000 r4:82eb0020
[   12.338731] [<802ce778>] (__kernel_read) from [<802ceaa0>] (kernel_read+0x48/0x8c)
[   12.338884]  r9:00000000 r8:8100ca3c r7:819e21e0 r6:8106cc08 r5:8106cbe8 r4:82bb3800
[   12.339012] [<802cea58>] (kernel_read) from [<802d7cd0>] (bprm_execve+0x2a4/0x61c)
[   12.339156]  r5:8106cbe8 r4:82bb3800
[   12.339240] [<802d7a2c>] (bprm_execve) from [<802d8f70>] (kernel_execve+0x120/0x180)
[   12.339387]  r10:00000000 r9:00000000 r8:8100ca3c r7:8100c9b0 r6:819e21e0 r5:82bb3800
[   12.339516]  r4:00000000
[   12.339589] [<802d8e50>] (kernel_execve) from [<80b7d0e4>] (run_init_process+0xb4/0xe0)
[   12.339742]  r9:00000000 r8:00000000 r7:80eb1744 r6:810dc540 r5:8100ca48 r4:80dde074
[   12.339871] [<80b7d030>] (run_init_process) from [<80b7d12c>] (try_to_run_init_process+0x1c/0x48)
[   12.340028]  r7:00000000 r6:00000000 r5:80dde074 r4:810ff000
[   12.340135] [<80b7d110>] (try_to_run_init_process) from [<80b97f20>] (kernel_init+0xcc/0x140)
[   12.340287]  r5:8100c9b0 r4:810ff000
[   12.340371] [<80b97e54>] (kernel_init) from [<801000f8>] (ret_from_fork+0x14/0x3c)
[   12.340555] Exception stack(0x8197ffb0 to 0x8197fff8)
[   12.340720] ffa0:                                     00000000 00000000 00000000 00000000
[   12.340873] ffc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
[   12.341015] ffe0: 00000000 00000000 00000000 00000000 00000013 00000000
[   12.341154]  r5:80b97e54 r4:00000000
[   12.341297] irq event stamp: 338305
[   12.341394] hardirqs last  enabled at (338313): [<8016b190>] __up_console_sem+0x70/0x90
[   12.341546] hardirqs last disabled at (338320): [<8016b17c>] __up_console_sem+0x5c/0x90
[   12.341693] softirqs last  enabled at (337228): [<801015ac>] __do_softirq+0x3a4/0x4bc
[   12.341849] softirqs last disabled at (337219): [<8011abf8>] __irq_exit_rcu+0x154/0x198
[   12.342017] ---[ end trace 90c4b572201178ca ]---

---
bisect:

# bad: [2376e5fe91bcad74b997d2cc0535abff79ec73c5] Add linux-next specific files for 20211026
# good: [3906fe9bb7f1a2c8667ae54e967dc8690824f4ea] Linux 5.15-rc7
git bisect start 'HEAD' 'v5.15-rc7'
# good: [18298270669947b661fe47bf7ec755a6d254c464] Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
git bisect good 18298270669947b661fe47bf7ec755a6d254c464
# bad: [2701cbf5818d2e249bc890297b6ccb4665bee93d] Merge branch 'auto-latest' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
git bisect bad 2701cbf5818d2e249bc890297b6ccb4665bee93d
# good: [3462546aa74a9901a8955c3b2c3e55d736353360] Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
git bisect good 3462546aa74a9901a8955c3b2c3e55d736353360
# bad: [0cb3815f5831d5d81e742a6ec9b8c5a8a24e3a3b] Merge branch 'for-next' of git://git.kernel.dk/linux-block.git
git bisect bad 0cb3815f5831d5d81e742a6ec9b8c5a8a24e3a3b
# bad: [36413f42eaf5ccaa42f9a79756129abc34cd27c9] Merge branch 'for-5.16/drivers' into for-next
git bisect bad 36413f42eaf5ccaa42f9a79756129abc34cd27c9
# good: [d28e4dff085c5a87025c9a0a85fb798bd8e9ca17] block: ataflop: more blk-mq refactoring fixes
git bisect good d28e4dff085c5a87025c9a0a85fb798bd8e9ca17
# good: [88eb469d0dcbd2206f8f16e76a2ab7a32475fbed] Merge branch 'for-5.16/bdev-size' into for-next
git bisect good 88eb469d0dcbd2206f8f16e76a2ab7a32475fbed
# bad: [49389040df8f2ac0e03412d6dcad0ef322cbbc1b] Merge branch 'for-5.16/block' into for-next
git bisect bad 49389040df8f2ac0e03412d6dcad0ef322cbbc1b
# bad: [ce807b324fd4e02ecd8d5e49fab16baad4af9575] Merge branch 'for-5.16/io_uring' into for-next
git bisect bad ce807b324fd4e02ecd8d5e49fab16baad4af9575
# good: [06114f3294e91818408c0008446ccf41d67cd63e] Merge branch 'for-5.16/bdev-size' into for-next
git bisect good 06114f3294e91818408c0008446ccf41d67cd63e
# bad: [44b2b16cb77838b9596e6551088b1b18657398c2] Merge branch 'for-5.16/block' into for-next
git bisect bad 44b2b16cb77838b9596e6551088b1b18657398c2
# bad: [59d62b58f1203a6b59a3e51244dee91ea80340cd] Merge branch 'for-5.16/block' into for-next
git bisect bad 59d62b58f1203a6b59a3e51244dee91ea80340cd
# bad: [dc5fc361d891e089dfd9c0a975dc78041036b906] block: attempt direct issue of plug list
git bisect bad dc5fc361d891e089dfd9c0a975dc78041036b906
# good: [bc490f81731e181b07b8d7577425c06ae91692c8] block: change plugging to use a singly linked list
git bisect good bc490f81731e181b07b8d7577425c06ae91692c8
# first bad commit: [dc5fc361d891e089dfd9c0a975dc78041036b906] block: attempt direct issue of plug list

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] block: attempt direct issue of plug list
  2021-10-26 14:42     ` Jens Axboe
@ 2021-10-27  2:16       ` Shinichiro Kawasaki
  0 siblings, 0 replies; 12+ messages in thread
From: Shinichiro Kawasaki @ 2021-10-27  2:16 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, hch, Damien Le Moal

On Oct 26, 2021 / 08:42, Jens Axboe wrote:
> On 10/25/21 11:20 PM, Shinichiro Kawasaki wrote:
> > On Oct 19, 2021 / 06:08, Jens Axboe wrote:
> >> If we have just one queue type in the plug list, then we can extend our
> >> direct issue to cover a full plug list as well.
> >>
> >> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> > 
> > Hi Jens, I tried out for-next branch and observed A WARNING "do not call
> > blocking ops when !TASK_RUNNING" [1]. Reverting this patch from the for-next
> > branch, the warning disappears. The warning was triggered when mkfs.xfs is
> > run for memory backed null_blk devices with "none" scheduler.

(snip)

> 
> This one should fix it:
> 
> https://git.kernel.dk/cgit/linux-block/commit/?h=for-5.16/block&id=ff1552232b3612edff43a95746a4e78e231ef3d4

I confirmed that the patch fixes the warning. Good. Thank you Ming, Jens.

-- 
Best Regards,
Shin'ichiro Kawasaki

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] block: attempt direct issue of plug list
  2021-10-27  1:13   ` Guenter Roeck
@ 2021-10-27  2:36     ` Jens Axboe
  0 siblings, 0 replies; 12+ messages in thread
From: Jens Axboe @ 2021-10-27  2:36 UTC (permalink / raw)
  To: Guenter Roeck; +Cc: linux-block, hch

On 10/26/21 7:13 PM, Guenter Roeck wrote:
> Hi,
> 
> On Tue, Oct 19, 2021 at 06:08:34AM -0600, Jens Axboe wrote:
>> If we have just one queue type in the plug list, then we can extend our
>> direct issue to cover a full plug list as well.
>>
>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
>> Reviewed-by: Christoph Hellwig <hch@lst.de>
> 
> This patch results in a number of warning tracebacks in linux-next.
> Reverting it fixes the problem. Example tracebacks and bisect result
> attached.

See other replies in this thread, it's fixed in the curren tree.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2021-10-27  2:36 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-19 12:08 [PATCHSET v2] Improve plugging Jens Axboe
2021-10-19 12:08 ` [PATCH 1/2] block: change plugging to use a singly linked list Jens Axboe
2021-10-19 13:34   ` Christoph Hellwig
2021-10-19 13:45     ` Jens Axboe
2021-10-19 12:08 ` [PATCH 2/2] block: attempt direct issue of plug list Jens Axboe
2021-10-19 13:36   ` Christoph Hellwig
2021-10-19 13:45     ` Jens Axboe
2021-10-26  5:20   ` Shinichiro Kawasaki
2021-10-26 14:42     ` Jens Axboe
2021-10-27  2:16       ` Shinichiro Kawasaki
2021-10-27  1:13   ` Guenter Roeck
2021-10-27  2:36     ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).