* unify and streamline the blk-mq make_request implementations V2
@ 2017-03-20 20:39 Christoph Hellwig
2017-03-20 20:39 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
` (4 more replies)
0 siblings, 5 replies; 14+ messages in thread
From: Christoph Hellwig @ 2017-03-20 20:39 UTC (permalink / raw)
To: axboe; +Cc: Bart.VanAssche, linux-block
A bunch of cleanups to get us a nice I/O submission path.
Changes since V1:
- rebase on top of the recent blk_mq_try_issue_directly changes
- incorporate comments from Bart
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
2017-03-20 20:39 unify and streamline the blk-mq make_request implementations V2 Christoph Hellwig
@ 2017-03-20 20:39 ` Christoph Hellwig
2017-03-21 1:11 ` Bart Van Assche
2017-03-21 13:33 ` Johannes Thumshirn
2017-03-20 20:39 ` [PATCH 2/4] blk-mq: merge mq and sq make_request instances Christoph Hellwig
` (3 subsequent siblings)
4 siblings, 2 replies; 14+ messages in thread
From: Christoph Hellwig @ 2017-03-20 20:39 UTC (permalink / raw)
To: axboe; +Cc: Bart.VanAssche, linux-block
This flag was never used since it was introduced.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-mq.c | 8 +-------
include/linux/blk-mq.h | 1 -
2 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 159187a28d66..acf0ddf4af52 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1534,13 +1534,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
plug = current->plug;
- /*
- * If the driver supports defer issued based on 'last', then
- * queue it up like normal since we can potentially save some
- * CPU this way.
- */
- if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
- !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
+ if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
struct request *old_rq = NULL;
blk_mq_bio_to_request(rq, bio);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..5b3e201c8d4f 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -152,7 +152,6 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
- BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_BLOCKING = 1 << 5,
BLK_MQ_F_NO_SCHED = 1 << 6,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
--
2.11.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 2/4] blk-mq: merge mq and sq make_request instances
2017-03-20 20:39 unify and streamline the blk-mq make_request implementations V2 Christoph Hellwig
2017-03-20 20:39 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
@ 2017-03-20 20:39 ` Christoph Hellwig
2017-03-21 1:33 ` Bart Van Assche
2017-03-20 20:39 ` [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly Christoph Hellwig
` (2 subsequent siblings)
4 siblings, 1 reply; 14+ messages in thread
From: Christoph Hellwig @ 2017-03-20 20:39 UTC (permalink / raw)
To: axboe; +Cc: Bart.VanAssche, linux-block
They are mostly the same code anyway - this just one small conditional
for the plug case that is different for both variants.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-mq.c | 164 +++++++++++----------------------------------------------
1 file changed, 31 insertions(+), 133 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index acf0ddf4af52..53e49a3f6f0a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1478,11 +1478,6 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
blk_mq_sched_insert_request(rq, false, true, true, false);
}
-/*
- * Multiple hardware queue variant. This will not use per-process plugs,
- * but will attempt to bypass the hctx queueing if we can go straight to
- * hardware for SYNC IO.
- */
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
@@ -1534,7 +1529,36 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
plug = current->plug;
- if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
+ if (plug && q->nr_hw_queues == 1) {
+ struct request *last = NULL;
+
+ blk_mq_bio_to_request(rq, bio);
+
+ /*
+ * @request_count may become stale because of schedule
+ * out, so check the list again.
+ */
+ if (list_empty(&plug->mq_list))
+ request_count = 0;
+ else if (blk_queue_nomerges(q))
+ request_count = blk_plug_queued_count(q);
+
+ if (!request_count)
+ trace_block_plug(q);
+ else
+ last = list_entry_rq(plug->mq_list.prev);
+
+ blk_mq_put_ctx(data.ctx);
+
+ if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
+ blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
+ blk_flush_plug_list(plug, false);
+ trace_block_plug(q);
+ }
+
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ goto done;
+ } else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
struct request *old_rq = NULL;
blk_mq_bio_to_request(rq, bio);
@@ -1596,119 +1620,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
return cookie;
}
-/*
- * Single hardware queue variant. This will attempt to use any per-process
- * plug for merging and IO deferral.
- */
-static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
-{
- const int is_sync = op_is_sync(bio->bi_opf);
- const int is_flush_fua = op_is_flush(bio->bi_opf);
- struct blk_plug *plug;
- unsigned int request_count = 0;
- struct blk_mq_alloc_data data = { .flags = 0 };
- struct request *rq;
- blk_qc_t cookie;
- unsigned int wb_acct;
-
- blk_queue_bounce(q, &bio);
-
- if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
- bio_io_error(bio);
- return BLK_QC_T_NONE;
- }
-
- blk_queue_split(q, &bio, q->bio_split);
-
- if (!is_flush_fua && !blk_queue_nomerges(q)) {
- if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return BLK_QC_T_NONE;
- } else
- request_count = blk_plug_queued_count(q);
-
- if (blk_mq_sched_bio_merge(q, bio))
- return BLK_QC_T_NONE;
-
- wb_acct = wbt_wait(q->rq_wb, bio, NULL);
-
- trace_block_getrq(q, bio, bio->bi_opf);
-
- rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
- if (unlikely(!rq)) {
- __wbt_done(q->rq_wb, wb_acct);
- return BLK_QC_T_NONE;
- }
-
- wbt_track(&rq->issue_stat, wb_acct);
-
- cookie = request_to_qc_t(data.hctx, rq);
-
- if (unlikely(is_flush_fua)) {
- if (q->elevator)
- goto elv_insert;
- blk_mq_bio_to_request(rq, bio);
- blk_insert_flush(rq);
- goto run_queue;
- }
-
- /*
- * A task plug currently exists. Since this is completely lockless,
- * utilize that to temporarily store requests until the task is
- * either done or scheduled away.
- */
- plug = current->plug;
- if (plug) {
- struct request *last = NULL;
-
- blk_mq_bio_to_request(rq, bio);
-
- /*
- * @request_count may become stale because of schedule
- * out, so check the list again.
- */
- if (list_empty(&plug->mq_list))
- request_count = 0;
- if (!request_count)
- trace_block_plug(q);
- else
- last = list_entry_rq(plug->mq_list.prev);
-
- blk_mq_put_ctx(data.ctx);
-
- if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
- blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
- blk_flush_plug_list(plug, false);
- trace_block_plug(q);
- }
-
- list_add_tail(&rq->queuelist, &plug->mq_list);
- return cookie;
- }
-
- if (q->elevator) {
-elv_insert:
- blk_mq_put_ctx(data.ctx);
- blk_mq_bio_to_request(rq, bio);
- blk_mq_sched_insert_request(rq, false, true,
- !is_sync || is_flush_fua, true);
- goto done;
- }
- if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
- /*
- * For a SYNC request, send it to the hardware immediately. For
- * an ASYNC request, just ensure that we run it later on. The
- * latter allows for merging opportunities and more efficient
- * dispatching.
- */
-run_queue:
- blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
- }
-
- blk_mq_put_ctx(data.ctx);
-done:
- return cookie;
-}
-
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx)
{
@@ -2366,10 +2277,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock);
- if (q->nr_hw_queues > 1)
- blk_queue_make_request(q, blk_mq_make_request);
- else
- blk_queue_make_request(q, blk_sq_make_request);
+ blk_queue_make_request(q, blk_mq_make_request);
/*
* Do this after blk_queue_make_request() overrides it...
@@ -2717,16 +2625,6 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
set->nr_hw_queues = nr_hw_queues;
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
-
- /*
- * Manually set the make_request_fn as blk_queue_make_request
- * resets a lot of the queue settings.
- */
- if (q->nr_hw_queues > 1)
- q->make_request_fn = blk_mq_make_request;
- else
- q->make_request_fn = blk_sq_make_request;
-
blk_mq_queue_reinit(q, cpu_online_mask);
}
--
2.11.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly
2017-03-20 20:39 unify and streamline the blk-mq make_request implementations V2 Christoph Hellwig
2017-03-20 20:39 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
2017-03-20 20:39 ` [PATCH 2/4] blk-mq: merge mq and sq make_request instances Christoph Hellwig
@ 2017-03-20 20:39 ` Christoph Hellwig
2017-03-21 1:35 ` Bart Van Assche
2017-03-20 20:39 ` [PATCH 4/4] blk-mq: streamline blk_mq_make_request Christoph Hellwig
2017-03-21 12:40 ` unify and streamline the blk-mq make_request implementations V2 Bart Van Assche
4 siblings, 1 reply; 14+ messages in thread
From: Christoph Hellwig @ 2017-03-20 20:39 UTC (permalink / raw)
To: axboe; +Cc: Bart.VanAssche, linux-block
Rename blk_mq_try_issue_directly to __blk_mq_try_issue_directly and add a
new wrapper that takes care of RCU / SRCU locking to avoid having
boileplate code in the caller which would get duplicated with new callers.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-mq.c | 32 ++++++++++++++++++--------------
1 file changed, 18 insertions(+), 14 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 53e49a3f6f0a..48748cb799ed 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1434,7 +1434,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
}
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
+static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
@@ -1478,13 +1478,27 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
blk_mq_sched_insert_request(rq, false, true, true, false);
}
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, blk_qc_t *cookie)
+{
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+ rcu_read_lock();
+ __blk_mq_try_issue_directly(rq, cookie);
+ rcu_read_unlock();
+ } else {
+ unsigned int srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+ __blk_mq_try_issue_directly(rq, cookie);
+ srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+ }
+}
+
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0 };
struct request *rq;
- unsigned int request_count = 0, srcu_idx;
+ unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
@@ -1582,18 +1596,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} else /* is_sync */
old_rq = rq;
blk_mq_put_ctx(data.ctx);
- if (!old_rq)
- goto done;
-
- if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
- rcu_read_lock();
- blk_mq_try_issue_directly(old_rq, &cookie);
- rcu_read_unlock();
- } else {
- srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
- blk_mq_try_issue_directly(old_rq, &cookie);
- srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
- }
+ if (old_rq)
+ blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
goto done;
}
--
2.11.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 4/4] blk-mq: streamline blk_mq_make_request
2017-03-20 20:39 unify and streamline the blk-mq make_request implementations V2 Christoph Hellwig
` (2 preceding siblings ...)
2017-03-20 20:39 ` [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly Christoph Hellwig
@ 2017-03-20 20:39 ` Christoph Hellwig
2017-03-21 12:40 ` unify and streamline the blk-mq make_request implementations V2 Bart Van Assche
4 siblings, 0 replies; 14+ messages in thread
From: Christoph Hellwig @ 2017-03-20 20:39 UTC (permalink / raw)
To: axboe; +Cc: Bart.VanAssche, linux-block
Turn the different ways of merging or issuing I/O into a series of if/else
statements instead of the current maze of gotos. Note that this means we
pin the CPU a little longer for some cases as the CTX put is moved to
common code at the end of the function.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-mq.c | 67 +++++++++++++++++++++++-----------------------------------
1 file changed, 27 insertions(+), 40 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 48748cb799ed..18e449cc832f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1534,16 +1534,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t(data.hctx, rq);
+ plug = current->plug;
if (unlikely(is_flush_fua)) {
- if (q->elevator)
- goto elv_insert;
blk_mq_bio_to_request(rq, bio);
- blk_insert_flush(rq);
- goto run_queue;
- }
-
- plug = current->plug;
- if (plug && q->nr_hw_queues == 1) {
+ if (q->elevator) {
+ blk_mq_sched_insert_request(rq, false, true,
+ !is_sync || is_flush_fua, true);
+ } else {
+ blk_insert_flush(rq);
+ blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
+ }
+ } else if (plug && q->nr_hw_queues == 1) {
struct request *last = NULL;
blk_mq_bio_to_request(rq, bio);
@@ -1562,8 +1563,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
else
last = list_entry_rq(plug->mq_list.prev);
- blk_mq_put_ctx(data.ctx);
-
if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
blk_flush_plug_list(plug, false);
@@ -1571,56 +1570,44 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
list_add_tail(&rq->queuelist, &plug->mq_list);
- goto done;
- } else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
- struct request *old_rq = NULL;
-
+ } else if (plug && !blk_queue_nomerges(q)) {
blk_mq_bio_to_request(rq, bio);
/*
* We do limited plugging. If the bio can be merged, do that.
* Otherwise the existing request in the plug list will be
* issued. So the plug list will have one request at most
+ *
+ * The plug list might get flushed before this. If that happens,
+ * the plug list is emptry and same_queue_rq is invalid.
*/
- if (plug) {
- /*
- * The plug list might get flushed before this. If that
- * happens, same_queue_rq is invalid and plug list is
- * empty
- */
- if (same_queue_rq && !list_empty(&plug->mq_list)) {
- old_rq = same_queue_rq;
- list_del_init(&old_rq->queuelist);
- }
- list_add_tail(&rq->queuelist, &plug->mq_list);
- } else /* is_sync */
- old_rq = rq;
- blk_mq_put_ctx(data.ctx);
- if (old_rq)
- blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
- goto done;
- }
+ if (!list_empty(&plug->mq_list))
+ list_del_init(&same_queue_rq->queuelist);
+ else
+ same_queue_rq = NULL;
- if (q->elevator) {
-elv_insert:
- blk_mq_put_ctx(data.ctx);
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ if (same_queue_rq)
+ blk_mq_try_issue_directly(data.hctx, same_queue_rq,
+ &cookie);
+ } else if (is_sync) {
+ blk_mq_bio_to_request(rq, bio);
+ blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+ } else if (q->elevator) {
blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true,
!is_sync || is_flush_fua, true);
- goto done;
- }
- if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+ } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
/*
* For a SYNC request, send it to the hardware immediately. For
* an ASYNC request, just ensure that we run it later on. The
* latter allows for merging opportunities and more efficient
* dispatching.
*/
-run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
+
blk_mq_put_ctx(data.ctx);
-done:
return cookie;
}
--
2.11.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
2017-03-20 20:39 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
@ 2017-03-21 1:11 ` Bart Van Assche
2017-03-21 13:33 ` Johannes Thumshirn
1 sibling, 0 replies; 14+ messages in thread
From: Bart Van Assche @ 2017-03-21 1:11 UTC (permalink / raw)
To: Christoph Hellwig, axboe; +Cc: Bart Van Assche, linux-block
On 03/20/2017 04:39 PM, Christoph Hellwig wrote:=0A=
> This flag was never used since it was introduced.=0A=
=0A=
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>=0A=
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 2/4] blk-mq: merge mq and sq make_request instances
2017-03-20 20:39 ` [PATCH 2/4] blk-mq: merge mq and sq make_request instances Christoph Hellwig
@ 2017-03-21 1:33 ` Bart Van Assche
0 siblings, 0 replies; 14+ messages in thread
From: Bart Van Assche @ 2017-03-21 1:33 UTC (permalink / raw)
To: Christoph Hellwig, axboe; +Cc: Bart Van Assche, linux-block
On 03/20/2017 04:39 PM, Christoph Hellwig wrote:=0A=
> @@ -1534,7 +1529,36 @@ static blk_qc_t blk_mq_make_request(struct request=
_queue *q, struct bio *bio)=0A=
> }=0A=
> =0A=
> plug =3D current->plug;=0A=
> - if (((plug && !blk_queue_nomerges(q)) || is_sync)) {=0A=
> + if (plug && q->nr_hw_queues =3D=3D 1) {=0A=
> + [ ... ]=0A=
> + } else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {=0A=
> struct request *old_rq =3D NULL;=0A=
> =0A=
> blk_mq_bio_to_request(rq, bio);=0A=
=0A=
I think this patch will change the behavior for the plug =3D=3D NULL &&=0A=
q->nr_hw_queues =3D=3D 1 && is_sync case: with this patch applied the code=
=0A=
under "else if" will be executed for that case but that wasn't the case=0A=
before this patch.=0A=
=0A=
Bart.=0A=
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly
2017-03-20 20:39 ` [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly Christoph Hellwig
@ 2017-03-21 1:35 ` Bart Van Assche
0 siblings, 0 replies; 14+ messages in thread
From: Bart Van Assche @ 2017-03-21 1:35 UTC (permalink / raw)
To: Christoph Hellwig, axboe; +Cc: Bart Van Assche, linux-block
On 03/20/2017 04:39 PM, Christoph Hellwig wrote:=0A=
> Rename blk_mq_try_issue_directly to __blk_mq_try_issue_directly and add a=
=0A=
> new wrapper that takes care of RCU / SRCU locking to avoid having=0A=
> boileplate code in the caller which would get duplicated with new callers=
.=0A=
=0A=
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>=0A=
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: unify and streamline the blk-mq make_request implementations V2
2017-03-20 20:39 unify and streamline the blk-mq make_request implementations V2 Christoph Hellwig
` (3 preceding siblings ...)
2017-03-20 20:39 ` [PATCH 4/4] blk-mq: streamline blk_mq_make_request Christoph Hellwig
@ 2017-03-21 12:40 ` Bart Van Assche
2017-03-22 18:59 ` hch
4 siblings, 1 reply; 14+ messages in thread
From: Bart Van Assche @ 2017-03-21 12:40 UTC (permalink / raw)
To: hch, axboe; +Cc: linux-block
On Mon, 2017-03-20 at 16:39 -0400, Christoph Hellwig wrote:
> Changes since V1:
>=A0=A0- rebase on top of the recent blk_mq_try_issue_directly changes
>=A0=A0- incorporate comments from Bart
Hi Christoph,
It seems to me like none of the three comments I had posted on patch 4/4
have been addressed. Please have another look at these comments.
Thanks,
Bart.
=A0=
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
2017-03-20 20:39 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
2017-03-21 1:11 ` Bart Van Assche
@ 2017-03-21 13:33 ` Johannes Thumshirn
1 sibling, 0 replies; 14+ messages in thread
From: Johannes Thumshirn @ 2017-03-21 13:33 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: axboe, Bart.VanAssche, linux-block
On Mon, Mar 20, 2017 at 04:39:27PM -0400, Christoph Hellwig wrote:
> This flag was never used since it was introduced.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
--
Johannes Thumshirn Storage
jthumshirn@suse.de +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N�rnberg
GF: Felix Imend�rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N�rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: unify and streamline the blk-mq make_request implementations V2
2017-03-21 12:40 ` unify and streamline the blk-mq make_request implementations V2 Bart Van Assche
@ 2017-03-22 18:59 ` hch
0 siblings, 0 replies; 14+ messages in thread
From: hch @ 2017-03-22 18:59 UTC (permalink / raw)
To: Bart Van Assche; +Cc: hch, axboe, linux-block
On Tue, Mar 21, 2017 at 12:40:17PM +0000, Bart Van Assche wrote:
> On Mon, 2017-03-20 at 16:39 -0400, Christoph Hellwig wrote:
> > Changes since V1:
> >��- rebase on top of the recent blk_mq_try_issue_directly changes
> >��- incorporate comments from Bart
>
> Hi Christoph,
>
> It seems to me like none of the three comments I had posted on patch 4/4
> have been addressed. Please have another look at these comments.
I had a look but resend the same old series again due to a rebase bug.
I'll resend it ASAP with what should address your comments to the previous
series as well as this one.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
2017-03-13 20:52 ` Bart Van Assche
@ 2017-03-13 23:16 ` hch
0 siblings, 0 replies; 14+ messages in thread
From: hch @ 2017-03-13 23:16 UTC (permalink / raw)
To: Bart Van Assche; +Cc: hch, axboe, linux-block
On Mon, Mar 13, 2017 at 08:52:54PM +0000, Bart Van Assche wrote:
> > - if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
> > - !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
> > + if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
>
> A minor comment: due to this change the outer pair of parentheses
> became superfluous. Please consider removing these.
The last patch in the series removes the statement in this form. But
if I have to respin the series for some reason I'll make sure it
gets removed here already.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
2017-03-13 15:48 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
@ 2017-03-13 20:52 ` Bart Van Assche
2017-03-13 23:16 ` hch
0 siblings, 1 reply; 14+ messages in thread
From: Bart Van Assche @ 2017-03-13 20:52 UTC (permalink / raw)
To: hch, axboe; +Cc: linux-block
On Mon, 2017-03-13 at 09:48 -0600, Christoph Hellwig wrote:
> This flag was never used since it was introduced.
>=20
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> block/blk-mq.c | 8 +-------
> include/linux/blk-mq.h | 1 -
> 2 files changed, 1 insertion(+), 8 deletions(-)
>=20
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 159187a28d66..acf0ddf4af52 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -1534,13 +1534,7 @@ static blk_qc_t blk_mq_make_request(struct request=
_queue *q, struct bio *bio)
> }
> =20
> plug =3D current->plug;
> - /*
> - * If the driver supports defer issued based on 'last', then
> - * queue it up like normal since we can potentially save some
> - * CPU this way.
> - */
> - if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
> - !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
> + if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
A minor comment: due to this change the outer pair of parentheses
became superfluous. Please consider removing these.
Thanks,
Bart.=
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
2017-03-13 15:48 unify and streamline the blk-mq make_request implementations Christoph Hellwig
@ 2017-03-13 15:48 ` Christoph Hellwig
2017-03-13 20:52 ` Bart Van Assche
0 siblings, 1 reply; 14+ messages in thread
From: Christoph Hellwig @ 2017-03-13 15:48 UTC (permalink / raw)
To: axboe; +Cc: linux-block
This flag was never used since it was introduced.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-mq.c | 8 +-------
include/linux/blk-mq.h | 1 -
2 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 159187a28d66..acf0ddf4af52 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1534,13 +1534,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
plug = current->plug;
- /*
- * If the driver supports defer issued based on 'last', then
- * queue it up like normal since we can potentially save some
- * CPU this way.
- */
- if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
- !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
+ if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
struct request *old_rq = NULL;
blk_mq_bio_to_request(rq, bio);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..5b3e201c8d4f 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -152,7 +152,6 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
- BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_BLOCKING = 1 << 5,
BLK_MQ_F_NO_SCHED = 1 << 6,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
--
2.11.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
end of thread, other threads:[~2017-03-22 18:59 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-03-20 20:39 unify and streamline the blk-mq make_request implementations V2 Christoph Hellwig
2017-03-20 20:39 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
2017-03-21 1:11 ` Bart Van Assche
2017-03-21 13:33 ` Johannes Thumshirn
2017-03-20 20:39 ` [PATCH 2/4] blk-mq: merge mq and sq make_request instances Christoph Hellwig
2017-03-21 1:33 ` Bart Van Assche
2017-03-20 20:39 ` [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly Christoph Hellwig
2017-03-21 1:35 ` Bart Van Assche
2017-03-20 20:39 ` [PATCH 4/4] blk-mq: streamline blk_mq_make_request Christoph Hellwig
2017-03-21 12:40 ` unify and streamline the blk-mq make_request implementations V2 Bart Van Assche
2017-03-22 18:59 ` hch
-- strict thread matches above, loose matches on Subject: below --
2017-03-13 15:48 unify and streamline the blk-mq make_request implementations Christoph Hellwig
2017-03-13 15:48 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
2017-03-13 20:52 ` Bart Van Assche
2017-03-13 23:16 ` hch
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.