From: Ming Lei <ming.lei@redhat.com>
To: Christoph Hellwig <hch@lst.de>
Cc: Jens Axboe <axboe@kernel.dk>, Sagi Grimberg <sagi@grimberg.me>,
"Paul E. McKenney" <paulmck@kernel.org>,
linux-nvme@lists.infradead.org, linux-block@vger.kernel.org,
Chao Leng <lengchao@huawei.com>, Keith Busch <kbusch@kernel.org>,
Ming Lin <mlin@kernel.org>
Subject: Re: [PATCH v5 1/2] blk-mq: add tagset quiesce interface
Date: Tue, 28 Jul 2020 17:16:33 +0800 [thread overview]
Message-ID: <20200728091633.GB1326626@T590> (raw)
In-Reply-To: <20200728071859.GA21629@lst.de>
On Tue, Jul 28, 2020 at 09:18:59AM +0200, Christoph Hellwig wrote:
> I like the tagset based interface. But the idea of doing a per-hctx
> allocation and wait doesn't seem very scalable.
>
> Paul, do you have any good idea for an interface that waits on
> multiple srcu heads? As far as I can tell we could just have a single
> global completion and counter, and each call_srcu would just just
> decrement it and then the final one would do the wakeup. It would just
> be great to figure out a way to keep the struct rcu_synchronize and
> counter on stack to avoid an allocation.
>
> But if we can't do with an on-stack object I'd much rather just embedd
> the rcu_head in the hw_ctx.
I think we can do that, please see the following patch which is against Sagi's V5:
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c3856377b961..fc46e77460f1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -27,6 +27,7 @@
#include <linux/crash_dump.h>
#include <linux/prefetch.h>
#include <linux/blk-crypto.h>
+#include <linux/rcupdate_wait.h>
#include <trace/events/block.h>
@@ -209,6 +210,50 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
+struct blk_mq_srcu_sync {
+ struct rcu_synchronize srcu_sync;
+ atomic_t count;
+};
+
+static void blk_mq_srcu_sync_init(struct blk_mq_srcu_sync *sync, int count)
+{
+ init_completion(&sync->srcu_sync.completion);
+ init_rcu_head(&sync->srcu_sync.head);
+
+ atomic_set(&sync->count, count);
+}
+
+static void blk_mq_srcu_sync_wait(struct blk_mq_srcu_sync *sync)
+{
+ wait_for_completion(&sync->srcu_sync.completion);
+ destroy_rcu_head_on_stack(&sync->srcu_sync.head);
+}
+
+static void blk_mq_wakeme_after_rcu(struct rcu_head *head)
+{
+ struct blk_mq_srcu_sync *sync;
+
+ sync = container_of(head, struct blk_mq_srcu_sync, srcu_sync.head);
+
+ if (atomic_dec_and_test(&sync->count))
+ complete(&sync->srcu_sync.completion);
+}
+
+static void blk_mq_quiesce_blocking_queue_async(struct request_queue *q,
+ struct blk_mq_srcu_sync *sync)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ blk_mq_quiesce_queue_nowait(q);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ WARN_ON_ONCE(!(hctx->flags & BLK_MQ_F_BLOCKING));
+ call_srcu(hctx->srcu, &sync->srcu_sync.head,
+ blk_mq_wakeme_after_rcu);
+ }
+}
+
/**
* blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
* @q: request queue.
@@ -2880,6 +2925,45 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
}
}
+void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
+{
+ struct request_queue *q;
+
+ mutex_lock(&set->tag_list_lock);
+ if (set->flags & BLK_MQ_F_BLOCKING) {
+ struct blk_mq_srcu_sync sync;
+ int count = 0;
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ count++;
+
+ blk_mq_srcu_sync_init(&sync, count);
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ blk_mq_quiesce_blocking_queue_async(q, &sync);
+
+ blk_mq_srcu_sync_wait(&sync);
+
+ } else {
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ blk_mq_quiesce_queue_nowait(q);
+ synchronize_rcu();
+ }
+ mutex_unlock(&set->tag_list_lock);
+}
+EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
+
+void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
+{
+ struct request_queue *q;
+
+ mutex_lock(&set->tag_list_lock);
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ blk_mq_unquiesce_queue(q);
+ mutex_unlock(&set->tag_list_lock);
+}
+EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
+
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
bool shared)
{
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 23230c1d031e..d5e0974a1dcc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -532,6 +532,8 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
+void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
+void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
unsigned int blk_mq_rq_cpu(struct request *rq);
--
Ming
_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
next prev parent reply other threads:[~2020-07-28 9:17 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-27 23:10 [PATCH v5 0/2] improve nvme quiesce time for large amount of namespaces Sagi Grimberg
2020-07-27 23:10 ` [PATCH v5 1/2] blk-mq: add tagset quiesce interface Sagi Grimberg
2020-07-27 23:32 ` Keith Busch
2020-07-28 0:12 ` Sagi Grimberg
2020-07-28 1:40 ` Ming Lei
2020-07-28 1:51 ` Jens Axboe
2020-07-28 2:17 ` Ming Lei
2020-07-28 2:23 ` Jens Axboe
2020-07-28 2:28 ` Ming Lei
2020-07-28 2:32 ` Jens Axboe
2020-07-28 3:29 ` Sagi Grimberg
2020-07-28 3:25 ` Sagi Grimberg
2020-07-28 7:18 ` Christoph Hellwig
2020-07-28 7:48 ` Sagi Grimberg
2020-07-28 9:16 ` Ming Lei [this message]
2020-07-28 9:24 ` Sagi Grimberg
2020-07-28 9:33 ` Ming Lei
2020-07-28 9:37 ` Sagi Grimberg
2020-07-28 9:43 ` Sagi Grimberg
2020-07-28 10:10 ` Ming Lei
2020-07-28 10:57 ` Christoph Hellwig
2020-07-28 14:13 ` Paul E. McKenney
2020-07-28 10:58 ` Christoph Hellwig
2020-07-28 16:25 ` Sagi Grimberg
2020-07-28 13:54 ` Paul E. McKenney
2020-07-28 23:46 ` Sagi Grimberg
2020-07-29 0:31 ` Paul E. McKenney
2020-07-29 0:43 ` Sagi Grimberg
2020-07-29 0:59 ` Keith Busch
2020-07-29 4:39 ` Sagi Grimberg
2020-08-07 9:04 ` Chao Leng
2020-08-07 9:24 ` Ming Lei
2020-08-07 9:35 ` Chao Leng
2020-07-29 4:10 ` Paul E. McKenney
2020-07-29 4:37 ` Sagi Grimberg
2020-07-27 23:10 ` [PATCH v5 2/2] nvme: use blk_mq_[un]quiesce_tagset Sagi Grimberg
2020-07-28 0:54 ` Sagi Grimberg
2020-07-28 3:21 ` Chao Leng
2020-07-28 3:34 ` Sagi Grimberg
2020-07-28 3:51 ` Chao Leng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200728091633.GB1326626@T590 \
--to=ming.lei@redhat.com \
--cc=axboe@kernel.dk \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=lengchao@huawei.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=mlin@kernel.org \
--cc=paulmck@kernel.org \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).