From: Jens Axboe <axboe@kernel.dk> To: linux-block@vger.kernel.org, linux-nvme@lists.infradead.org Cc: Jens Axboe <axboe@kernel.dk> Subject: [PATCH 7/8] blk-mq: use bd->last == true for list inserts Date: Mon, 26 Nov 2018 09:35:55 -0700 [thread overview] Message-ID: <20181126163556.5181-8-axboe@kernel.dk> (raw) In-Reply-To: <20181126163556.5181-1-axboe@kernel.dk> If we are issuing a list of requests, we know if we're at the last one. If we fail issuing, ensure that we call ->commits_rqs() to flush any potential previous requests. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- block/blk-core.c | 2 +- block/blk-mq.c | 32 ++++++++++++++++++++++++-------- block/blk-mq.h | 2 +- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index c9758d185357..808a65d23f1a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1334,7 +1334,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * * bypass a potential scheduler on the bottom device for * insert. */ - return blk_mq_request_issue_directly(rq); + return blk_mq_request_issue_directly(rq, true); } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); diff --git a/block/blk-mq.c b/block/blk-mq.c index 6a249bf6ed00..0a12cec0b426 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1260,6 +1260,14 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, if (!list_empty(list)) { bool needs_restart; + /* + * If we didn't flush the entire list, we could have told + * the driver there was more coming, but that turned out to + * be a lie. + */ + if (q->mq_ops->commit_rqs) + q->mq_ops->commit_rqs(hctx); + spin_lock(&hctx->lock); list_splice_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); @@ -1736,12 +1744,12 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, - blk_qc_t *cookie) + blk_qc_t *cookie, bool last) { struct request_queue *q = rq->q; struct blk_mq_queue_data bd = { .rq = rq, - .last = true, + .last = last, }; blk_qc_t new_cookie; blk_status_t ret; @@ -1776,7 +1784,7 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_qc_t *cookie, - bool bypass_insert) + bool bypass_insert, bool last) { struct request_queue *q = rq->q; bool run_queue = true; @@ -1805,7 +1813,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, goto insert; } - return __blk_mq_issue_directly(hctx, rq, cookie); + return __blk_mq_issue_directly(hctx, rq, cookie, last); insert: if (bypass_insert) return BLK_STS_RESOURCE; @@ -1824,7 +1832,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); + ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) blk_mq_sched_insert_request(rq, false, true, false); else if (ret != BLK_STS_OK) @@ -1833,7 +1841,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, hctx_unlock(hctx, srcu_idx); } -blk_status_t blk_mq_request_issue_directly(struct request *rq) +blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { blk_status_t ret; int srcu_idx; @@ -1841,7 +1849,7 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq) struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true); + ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); hctx_unlock(hctx, srcu_idx); return ret; @@ -1856,7 +1864,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, queuelist); list_del_init(&rq->queuelist); - ret = blk_mq_request_issue_directly(rq); + ret = blk_mq_request_issue_directly(rq, list_empty(list)); if (ret != BLK_STS_OK) { if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { @@ -1866,6 +1874,14 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, blk_mq_end_request(rq, ret); } } + + /* + * If we didn't flush the entire list, we could have told + * the driver there was more coming, but that turned out to + * be a lie. + */ + if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs) + hctx->queue->mq_ops->commit_rqs(hctx); } static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) diff --git a/block/blk-mq.h b/block/blk-mq.h index 9ae8e9f8f8b1..7291e5379358 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -69,7 +69,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list); /* Used by blk_insert_cloned_request() to issue request directly */ -blk_status_t blk_mq_request_issue_directly(struct request *rq); +blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last); void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list); -- 2.17.1
WARNING: multiple messages have this Message-ID (diff)
From: axboe@kernel.dk (Jens Axboe) Subject: [PATCH 7/8] blk-mq: use bd->last == true for list inserts Date: Mon, 26 Nov 2018 09:35:55 -0700 [thread overview] Message-ID: <20181126163556.5181-8-axboe@kernel.dk> (raw) In-Reply-To: <20181126163556.5181-1-axboe@kernel.dk> If we are issuing a list of requests, we know if we're at the last one. If we fail issuing, ensure that we call ->commits_rqs() to flush any potential previous requests. Signed-off-by: Jens Axboe <axboe at kernel.dk> --- block/blk-core.c | 2 +- block/blk-mq.c | 32 ++++++++++++++++++++++++-------- block/blk-mq.h | 2 +- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index c9758d185357..808a65d23f1a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1334,7 +1334,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * * bypass a potential scheduler on the bottom device for * insert. */ - return blk_mq_request_issue_directly(rq); + return blk_mq_request_issue_directly(rq, true); } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); diff --git a/block/blk-mq.c b/block/blk-mq.c index 6a249bf6ed00..0a12cec0b426 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1260,6 +1260,14 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, if (!list_empty(list)) { bool needs_restart; + /* + * If we didn't flush the entire list, we could have told + * the driver there was more coming, but that turned out to + * be a lie. + */ + if (q->mq_ops->commit_rqs) + q->mq_ops->commit_rqs(hctx); + spin_lock(&hctx->lock); list_splice_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); @@ -1736,12 +1744,12 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, - blk_qc_t *cookie) + blk_qc_t *cookie, bool last) { struct request_queue *q = rq->q; struct blk_mq_queue_data bd = { .rq = rq, - .last = true, + .last = last, }; blk_qc_t new_cookie; blk_status_t ret; @@ -1776,7 +1784,7 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_qc_t *cookie, - bool bypass_insert) + bool bypass_insert, bool last) { struct request_queue *q = rq->q; bool run_queue = true; @@ -1805,7 +1813,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, goto insert; } - return __blk_mq_issue_directly(hctx, rq, cookie); + return __blk_mq_issue_directly(hctx, rq, cookie, last); insert: if (bypass_insert) return BLK_STS_RESOURCE; @@ -1824,7 +1832,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); + ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) blk_mq_sched_insert_request(rq, false, true, false); else if (ret != BLK_STS_OK) @@ -1833,7 +1841,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, hctx_unlock(hctx, srcu_idx); } -blk_status_t blk_mq_request_issue_directly(struct request *rq) +blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { blk_status_t ret; int srcu_idx; @@ -1841,7 +1849,7 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq) struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true); + ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); hctx_unlock(hctx, srcu_idx); return ret; @@ -1856,7 +1864,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, queuelist); list_del_init(&rq->queuelist); - ret = blk_mq_request_issue_directly(rq); + ret = blk_mq_request_issue_directly(rq, list_empty(list)); if (ret != BLK_STS_OK) { if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { @@ -1866,6 +1874,14 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, blk_mq_end_request(rq, ret); } } + + /* + * If we didn't flush the entire list, we could have told + * the driver there was more coming, but that turned out to + * be a lie. + */ + if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs) + hctx->queue->mq_ops->commit_rqs(hctx); } static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) diff --git a/block/blk-mq.h b/block/blk-mq.h index 9ae8e9f8f8b1..7291e5379358 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -69,7 +69,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list); /* Used by blk_insert_cloned_request() to issue request directly */ -blk_status_t blk_mq_request_issue_directly(struct request *rq); +blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last); void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list); -- 2.17.1
next prev parent reply other threads:[~2018-11-26 16:36 UTC|newest] Thread overview: 84+ messages / expand[flat|nested] mbox.gz Atom feed top 2018-11-26 16:35 [PATCHSET 0/8] block plugging improvements Jens Axboe 2018-11-26 16:35 ` Jens Axboe 2018-11-26 16:35 ` [PATCH 1/8] block: sum requests in the plug structure Jens Axboe 2018-11-26 16:35 ` Jens Axboe 2018-11-26 17:02 ` Christoph Hellwig 2018-11-26 17:02 ` Christoph Hellwig 2018-11-26 16:35 ` [PATCH 2/8] block: improve logic around when to sort a plug list Jens Axboe 2018-11-26 16:35 ` Jens Axboe 2018-11-27 23:31 ` Omar Sandoval 2018-11-27 23:31 ` Omar Sandoval 2018-11-27 23:49 ` Jens Axboe 2018-11-27 23:49 ` Jens Axboe 2018-11-27 23:55 ` Omar Sandoval 2018-11-27 23:55 ` Omar Sandoval 2018-11-27 23:59 ` Jens Axboe 2018-11-27 23:59 ` Jens Axboe 2018-11-28 0:05 ` Omar Sandoval 2018-11-28 0:05 ` Omar Sandoval 2018-11-28 0:16 ` Jens Axboe 2018-11-28 0:16 ` Jens Axboe 2018-11-26 16:35 ` [PATCH 3/8] blk-mq: add mq_ops->commit_rqs() Jens Axboe 2018-11-26 16:35 ` Jens Axboe 2018-11-27 23:43 ` Omar Sandoval 2018-11-27 23:43 ` Omar Sandoval 2018-11-28 1:38 ` Ming Lei 2018-11-28 1:38 ` Ming Lei 2018-11-28 7:16 ` Christoph Hellwig 2018-11-28 7:16 ` Christoph Hellwig 2018-11-28 12:54 ` Jens Axboe 2018-11-28 12:54 ` Jens Axboe 2018-11-26 16:35 ` [PATCH 4/8] nvme: implement mq_ops->commit_rqs() hook Jens Axboe 2018-11-26 16:35 ` Jens Axboe 2018-11-28 7:20 ` Christoph Hellwig 2018-11-28 7:20 ` Christoph Hellwig 2018-11-28 13:07 ` Jens Axboe 2018-11-28 13:07 ` Jens Axboe 2018-11-26 16:35 ` [PATCH 5/8] virtio_blk: " Jens Axboe 2018-11-26 16:35 ` Jens Axboe 2018-11-27 23:45 ` Omar Sandoval 2018-11-27 23:45 ` Omar Sandoval 2018-11-28 3:05 ` Michael S. Tsirkin 2018-11-28 3:05 ` Michael S. Tsirkin 2018-11-28 2:10 ` Ming Lei 2018-11-28 2:10 ` Ming Lei 2018-11-28 2:34 ` Jens Axboe 2018-11-28 2:34 ` Jens Axboe 2018-11-29 1:23 ` Ming Lei 2018-11-29 1:23 ` Ming Lei 2018-11-29 2:19 ` Jens Axboe 2018-11-29 2:19 ` Jens Axboe 2018-11-29 2:51 ` Ming Lei 2018-11-29 2:51 ` Ming Lei 2018-11-29 3:13 ` Jens Axboe 2018-11-29 3:13 ` Jens Axboe 2018-11-29 3:27 ` Ming Lei 2018-11-29 3:27 ` Ming Lei 2018-11-29 3:53 ` Jens Axboe 2018-11-29 3:53 ` Jens Axboe 2018-11-28 7:21 ` Christoph Hellwig 2018-11-28 7:21 ` Christoph Hellwig 2018-11-26 16:35 ` [PATCH 6/8] ataflop: " Jens Axboe 2018-11-26 16:35 ` Jens Axboe 2018-11-27 23:46 ` Omar Sandoval 2018-11-27 23:46 ` Omar Sandoval 2018-11-28 7:22 ` Christoph Hellwig 2018-11-28 7:22 ` Christoph Hellwig 2018-11-28 13:09 ` Jens Axboe 2018-11-28 13:09 ` Jens Axboe 2018-11-26 16:35 ` Jens Axboe [this message] 2018-11-26 16:35 ` [PATCH 7/8] blk-mq: use bd->last == true for list inserts Jens Axboe 2018-11-27 23:49 ` Omar Sandoval 2018-11-27 23:49 ` Omar Sandoval 2018-11-27 23:51 ` Jens Axboe 2018-11-27 23:51 ` Jens Axboe 2018-11-28 1:49 ` Ming Lei 2018-11-28 1:49 ` Ming Lei 2018-11-28 2:37 ` Jens Axboe 2018-11-28 2:37 ` Jens Axboe 2018-11-26 16:35 ` [PATCH 8/8] blk-mq: add plug case for devices that implement ->commits_rqs() Jens Axboe 2018-11-26 16:35 ` Jens Axboe 2018-11-28 7:26 ` Christoph Hellwig 2018-11-28 7:26 ` Christoph Hellwig 2018-11-28 13:11 ` Jens Axboe 2018-11-28 13:11 ` Jens Axboe
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20181126163556.5181-8-axboe@kernel.dk \ --to=axboe@kernel.dk \ --cc=linux-block@vger.kernel.org \ --cc=linux-nvme@lists.infradead.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.