All of lore.kernel.org
 help / color / mirror / Atom feed
* unify and streamline the blk-mq make_request implementations
@ 2017-03-13 15:48 Christoph Hellwig
  2017-03-13 15:48 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Christoph Hellwig @ 2017-03-13 15:48 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

A bunch of cleanups to get us a nice I/O submission path.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
  2017-03-13 15:48 unify and streamline the blk-mq make_request implementations Christoph Hellwig
@ 2017-03-13 15:48 ` Christoph Hellwig
  2017-03-13 20:52   ` Bart Van Assche
  2017-03-13 15:48 ` [PATCH 2/4] blk-mq: merge mq and sq make_request instances Christoph Hellwig
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2017-03-13 15:48 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

This flag was never used since it was introduced.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c         | 8 +-------
 include/linux/blk-mq.h | 1 -
 2 files changed, 1 insertion(+), 8 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 159187a28d66..acf0ddf4af52 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1534,13 +1534,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 	}
 
 	plug = current->plug;
-	/*
-	 * If the driver supports defer issued based on 'last', then
-	 * queue it up like normal since we can potentially save some
-	 * CPU this way.
-	 */
-	if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
-	    !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
+	if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
 		struct request *old_rq = NULL;
 
 		blk_mq_bio_to_request(rq, bio);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..5b3e201c8d4f 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -152,7 +152,6 @@ enum {
 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
 	BLK_MQ_F_TAG_SHARED	= 1 << 1,
 	BLK_MQ_F_SG_MERGE	= 1 << 2,
-	BLK_MQ_F_DEFER_ISSUE	= 1 << 4,
 	BLK_MQ_F_BLOCKING	= 1 << 5,
 	BLK_MQ_F_NO_SCHED	= 1 << 6,
 	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/4] blk-mq: merge mq and sq make_request instances
  2017-03-13 15:48 unify and streamline the blk-mq make_request implementations Christoph Hellwig
  2017-03-13 15:48 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
@ 2017-03-13 15:48 ` Christoph Hellwig
  2017-03-13 21:01   ` Bart Van Assche
  2017-03-13 15:48 ` [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly Christoph Hellwig
  2017-03-13 15:48 ` [PATCH 4/4] blk-mq: streamline blk_mq_make_request Christoph Hellwig
  3 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2017-03-13 15:48 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

They are mostly the same code anyway - this just one small conditional
for the plug case that is different for both variants.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 164 +++++++++++----------------------------------------------
 1 file changed, 31 insertions(+), 133 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index acf0ddf4af52..53e49a3f6f0a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1478,11 +1478,6 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
 	blk_mq_sched_insert_request(rq, false, true, true, false);
 }
 
-/*
- * Multiple hardware queue variant. This will not use per-process plugs,
- * but will attempt to bypass the hctx queueing if we can go straight to
- * hardware for SYNC IO.
- */
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
 	const int is_sync = op_is_sync(bio->bi_opf);
@@ -1534,7 +1529,36 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 	}
 
 	plug = current->plug;
-	if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
+	if (plug && q->nr_hw_queues == 1) {
+		struct request *last = NULL;
+
+		blk_mq_bio_to_request(rq, bio);
+
+		/*
+		 * @request_count may become stale because of schedule
+		 * out, so check the list again.
+		 */
+		if (list_empty(&plug->mq_list))
+			request_count = 0;
+		else if (blk_queue_nomerges(q))
+			request_count = blk_plug_queued_count(q);
+
+		if (!request_count)
+			trace_block_plug(q);
+		else
+			last = list_entry_rq(plug->mq_list.prev);
+
+		blk_mq_put_ctx(data.ctx);
+
+		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
+		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
+			blk_flush_plug_list(plug, false);
+			trace_block_plug(q);
+		}
+
+		list_add_tail(&rq->queuelist, &plug->mq_list);
+		goto done;
+	} else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
 		struct request *old_rq = NULL;
 
 		blk_mq_bio_to_request(rq, bio);
@@ -1596,119 +1620,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 	return cookie;
 }
 
-/*
- * Single hardware queue variant. This will attempt to use any per-process
- * plug for merging and IO deferral.
- */
-static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
-{
-	const int is_sync = op_is_sync(bio->bi_opf);
-	const int is_flush_fua = op_is_flush(bio->bi_opf);
-	struct blk_plug *plug;
-	unsigned int request_count = 0;
-	struct blk_mq_alloc_data data = { .flags = 0 };
-	struct request *rq;
-	blk_qc_t cookie;
-	unsigned int wb_acct;
-
-	blk_queue_bounce(q, &bio);
-
-	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		bio_io_error(bio);
-		return BLK_QC_T_NONE;
-	}
-
-	blk_queue_split(q, &bio, q->bio_split);
-
-	if (!is_flush_fua && !blk_queue_nomerges(q)) {
-		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
-			return BLK_QC_T_NONE;
-	} else
-		request_count = blk_plug_queued_count(q);
-
-	if (blk_mq_sched_bio_merge(q, bio))
-		return BLK_QC_T_NONE;
-
-	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
-
-	trace_block_getrq(q, bio, bio->bi_opf);
-
-	rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
-	if (unlikely(!rq)) {
-		__wbt_done(q->rq_wb, wb_acct);
-		return BLK_QC_T_NONE;
-	}
-
-	wbt_track(&rq->issue_stat, wb_acct);
-
-	cookie = request_to_qc_t(data.hctx, rq);
-
-	if (unlikely(is_flush_fua)) {
-		if (q->elevator)
-			goto elv_insert;
-		blk_mq_bio_to_request(rq, bio);
-		blk_insert_flush(rq);
-		goto run_queue;
-	}
-
-	/*
-	 * A task plug currently exists. Since this is completely lockless,
-	 * utilize that to temporarily store requests until the task is
-	 * either done or scheduled away.
-	 */
-	plug = current->plug;
-	if (plug) {
-		struct request *last = NULL;
-
-		blk_mq_bio_to_request(rq, bio);
-
-		/*
-		 * @request_count may become stale because of schedule
-		 * out, so check the list again.
-		 */
-		if (list_empty(&plug->mq_list))
-			request_count = 0;
-		if (!request_count)
-			trace_block_plug(q);
-		else
-			last = list_entry_rq(plug->mq_list.prev);
-
-		blk_mq_put_ctx(data.ctx);
-
-		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
-		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
-			blk_flush_plug_list(plug, false);
-			trace_block_plug(q);
-		}
-
-		list_add_tail(&rq->queuelist, &plug->mq_list);
-		return cookie;
-	}
-
-	if (q->elevator) {
-elv_insert:
-		blk_mq_put_ctx(data.ctx);
-		blk_mq_bio_to_request(rq, bio);
-		blk_mq_sched_insert_request(rq, false, true,
-						!is_sync || is_flush_fua, true);
-		goto done;
-	}
-	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
-		/*
-		 * For a SYNC request, send it to the hardware immediately. For
-		 * an ASYNC request, just ensure that we run it later on. The
-		 * latter allows for merging opportunities and more efficient
-		 * dispatching.
-		 */
-run_queue:
-		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
-	}
-
-	blk_mq_put_ctx(data.ctx);
-done:
-	return cookie;
-}
-
 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 		     unsigned int hctx_idx)
 {
@@ -2366,10 +2277,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	INIT_LIST_HEAD(&q->requeue_list);
 	spin_lock_init(&q->requeue_lock);
 
-	if (q->nr_hw_queues > 1)
-		blk_queue_make_request(q, blk_mq_make_request);
-	else
-		blk_queue_make_request(q, blk_sq_make_request);
+	blk_queue_make_request(q, blk_mq_make_request);
 
 	/*
 	 * Do this after blk_queue_make_request() overrides it...
@@ -2717,16 +2625,6 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
 	set->nr_hw_queues = nr_hw_queues;
 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
 		blk_mq_realloc_hw_ctxs(set, q);
-
-		/*
-		 * Manually set the make_request_fn as blk_queue_make_request
-		 * resets a lot of the queue settings.
-		 */
-		if (q->nr_hw_queues > 1)
-			q->make_request_fn = blk_mq_make_request;
-		else
-			q->make_request_fn = blk_sq_make_request;
-
 		blk_mq_queue_reinit(q, cpu_online_mask);
 	}
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly
  2017-03-13 15:48 unify and streamline the blk-mq make_request implementations Christoph Hellwig
  2017-03-13 15:48 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
  2017-03-13 15:48 ` [PATCH 2/4] blk-mq: merge mq and sq make_request instances Christoph Hellwig
@ 2017-03-13 15:48 ` Christoph Hellwig
  2017-03-13 21:02   ` Bart Van Assche
  2017-03-13 15:48 ` [PATCH 4/4] blk-mq: streamline blk_mq_make_request Christoph Hellwig
  3 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2017-03-13 15:48 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

Rename blk_mq_try_issue_directly to __blk_mq_try_issue_directly and add a
new wrapper that takes care of RCU / SRCU locking to avoid having
boileplate code in the caller which would get duplicated with new callers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 32 ++++++++++++++++++--------------
 1 file changed, 18 insertions(+), 14 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 53e49a3f6f0a..48748cb799ed 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1434,7 +1434,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
+static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
 {
 	struct request_queue *q = rq->q;
 	struct blk_mq_queue_data bd = {
@@ -1478,13 +1478,27 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
 	blk_mq_sched_insert_request(rq, false, true, true, false);
 }
 
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+		struct request *rq, blk_qc_t *cookie)
+{
+	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+		rcu_read_lock();
+		__blk_mq_try_issue_directly(rq, cookie);
+		rcu_read_unlock();
+	} else {
+		unsigned int srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+		__blk_mq_try_issue_directly(rq, cookie);
+		srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+	}
+}
+
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
 	const int is_sync = op_is_sync(bio->bi_opf);
 	const int is_flush_fua = op_is_flush(bio->bi_opf);
 	struct blk_mq_alloc_data data = { .flags = 0 };
 	struct request *rq;
-	unsigned int request_count = 0, srcu_idx;
+	unsigned int request_count = 0;
 	struct blk_plug *plug;
 	struct request *same_queue_rq = NULL;
 	blk_qc_t cookie;
@@ -1582,18 +1596,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 		} else /* is_sync */
 			old_rq = rq;
 		blk_mq_put_ctx(data.ctx);
-		if (!old_rq)
-			goto done;
-
-		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
-			rcu_read_lock();
-			blk_mq_try_issue_directly(old_rq, &cookie);
-			rcu_read_unlock();
-		} else {
-			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
-			blk_mq_try_issue_directly(old_rq, &cookie);
-			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
-		}
+		if (old_rq)
+			blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
 		goto done;
 	}
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/4] blk-mq: streamline blk_mq_make_request
  2017-03-13 15:48 unify and streamline the blk-mq make_request implementations Christoph Hellwig
                   ` (2 preceding siblings ...)
  2017-03-13 15:48 ` [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly Christoph Hellwig
@ 2017-03-13 15:48 ` Christoph Hellwig
  2017-03-14 15:40   ` Bart Van Assche
  3 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2017-03-13 15:48 UTC (permalink / raw)
  To: axboe; +Cc: linux-block

Turn the different ways of merging or issuing I/O into a series of if/else
statements instead of the current maze of gotos.  Note that this means we
pin the CPU a little longer for some cases as the CTX put is moved to
common code at the end of the function.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 67 +++++++++++++++++++++++-----------------------------------
 1 file changed, 27 insertions(+), 40 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 48748cb799ed..18e449cc832f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1534,16 +1534,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 	cookie = request_to_qc_t(data.hctx, rq);
 
+	plug = current->plug;
 	if (unlikely(is_flush_fua)) {
-		if (q->elevator)
-			goto elv_insert;
 		blk_mq_bio_to_request(rq, bio);
-		blk_insert_flush(rq);
-		goto run_queue;
-	}
-
-	plug = current->plug;
-	if (plug && q->nr_hw_queues == 1) {
+		if (q->elevator) {
+			blk_mq_sched_insert_request(rq, false, true,
+						!is_sync || is_flush_fua, true);
+		} else {
+			blk_insert_flush(rq);
+			blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
+		}
+	} else if (plug && q->nr_hw_queues == 1) {
 		struct request *last = NULL;
 
 		blk_mq_bio_to_request(rq, bio);
@@ -1562,8 +1563,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 		else
 			last = list_entry_rq(plug->mq_list.prev);
 
-		blk_mq_put_ctx(data.ctx);
-
 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
 			blk_flush_plug_list(plug, false);
@@ -1571,56 +1570,44 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 		}
 
 		list_add_tail(&rq->queuelist, &plug->mq_list);
-		goto done;
-	} else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
-		struct request *old_rq = NULL;
-
+	} else if (plug && !blk_queue_nomerges(q)) {
 		blk_mq_bio_to_request(rq, bio);
 
 		/*
 		 * We do limited plugging. If the bio can be merged, do that.
 		 * Otherwise the existing request in the plug list will be
 		 * issued. So the plug list will have one request at most
+		 *
+		 * The plug list might get flushed before this. If that happens,
+		 * the plug list is emptry and same_queue_rq is invalid.
 		 */
-		if (plug) {
-			/*
-			 * The plug list might get flushed before this. If that
-			 * happens, same_queue_rq is invalid and plug list is
-			 * empty
-			 */
-			if (same_queue_rq && !list_empty(&plug->mq_list)) {
-				old_rq = same_queue_rq;
-				list_del_init(&old_rq->queuelist);
-			}
-			list_add_tail(&rq->queuelist, &plug->mq_list);
-		} else /* is_sync */
-			old_rq = rq;
-		blk_mq_put_ctx(data.ctx);
-		if (old_rq)
-			blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
-		goto done;
-	}
+		if (!list_empty(&plug->mq_list))
+			list_del_init(&same_queue_rq->queuelist);
+		else
+			same_queue_rq = NULL;
 
-	if (q->elevator) {
-elv_insert:
-		blk_mq_put_ctx(data.ctx);
+		list_add_tail(&rq->queuelist, &plug->mq_list);
+		if (same_queue_rq)
+			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
+					&cookie);
+	} else if (is_sync) {
+		blk_mq_bio_to_request(rq, bio);
+		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+	} else if (q->elevator) {
 		blk_mq_bio_to_request(rq, bio);
 		blk_mq_sched_insert_request(rq, false, true,
 						!is_sync || is_flush_fua, true);
-		goto done;
-	}
-	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+	} else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
 		/*
 		 * For a SYNC request, send it to the hardware immediately. For
 		 * an ASYNC request, just ensure that we run it later on. The
 		 * latter allows for merging opportunities and more efficient
 		 * dispatching.
 		 */
-run_queue:
 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
 	}
+
 	blk_mq_put_ctx(data.ctx);
-done:
 	return cookie;
 }
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
  2017-03-13 15:48 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
@ 2017-03-13 20:52   ` Bart Van Assche
  2017-03-13 23:16     ` hch
  0 siblings, 1 reply; 11+ messages in thread
From: Bart Van Assche @ 2017-03-13 20:52 UTC (permalink / raw)
  To: hch, axboe; +Cc: linux-block

On Mon, 2017-03-13 at 09:48 -0600, Christoph Hellwig wrote:
> This flag was never used since it was introduced.
>=20
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  block/blk-mq.c         | 8 +-------
>  include/linux/blk-mq.h | 1 -
>  2 files changed, 1 insertion(+), 8 deletions(-)
>=20
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 159187a28d66..acf0ddf4af52 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -1534,13 +1534,7 @@ static blk_qc_t blk_mq_make_request(struct request=
_queue *q, struct bio *bio)
>  	}
> =20
>  	plug =3D current->plug;
> -	/*
> -	 * If the driver supports defer issued based on 'last', then
> -	 * queue it up like normal since we can potentially save some
> -	 * CPU this way.
> -	 */
> -	if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
> -	    !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
> +	if (((plug && !blk_queue_nomerges(q)) || is_sync)) {

A minor comment: due to this change the outer pair of parentheses
became superfluous. Please consider removing these.

Thanks,

Bart.=

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/4] blk-mq: merge mq and sq make_request instances
  2017-03-13 15:48 ` [PATCH 2/4] blk-mq: merge mq and sq make_request instances Christoph Hellwig
@ 2017-03-13 21:01   ` Bart Van Assche
  2017-03-13 23:17     ` hch
  0 siblings, 1 reply; 11+ messages in thread
From: Bart Van Assche @ 2017-03-13 21:01 UTC (permalink / raw)
  To: hch, axboe; +Cc: linux-block

On Mon, 2017-03-13 at 09:48 -0600, Christoph Hellwig wrote:
> @@ -1534,7 +1529,36 @@ static blk_qc_t blk_mq_make_request(struct request=
_queue *q, struct bio *bio)
>  	}
> =20
>  	plug =3D current->plug;
> -	if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
> +	if (plug && q->nr_hw_queues =3D=3D 1) {
> +		struct request *last =3D NULL;
> +
> +		blk_mq_bio_to_request(rq, bio);
> +
> +		/*
> +		 * @request_count may become stale because of schedule
> +		 * out, so check the list again.
> +		 */

The above comment was relevant as long as there was a request_count assignm=
ent
above blk_mq_sched_get_request(). This patch moves that assignment inside i=
f
(plug && q->nr_hw_queues =3D=3D 1). Does that mean that the above comment s=
hould be
removed entirely?

> +		if (list_empty(&plug->mq_list))
> +			request_count =3D 0;
> +		else if (blk_queue_nomerges(q))
> +			request_count =3D blk_plug_queued_count(q);
> +
> +		if (!request_count)
> +			trace_block_plug(q);
> +		else
> +			last =3D list_entry_rq(plug->mq_list.prev);
> +
> +		blk_mq_put_ctx(data.ctx);
> +
> +		if (request_count >=3D BLK_MAX_REQUEST_COUNT || (last &&
> +		    blk_rq_bytes(last) >=3D BLK_PLUG_FLUSH_SIZE)) {
> +			blk_flush_plug_list(plug, false);
> +			trace_block_plug(q);
> +		}
> +
> +		list_add_tail(&rq->queuelist, &plug->mq_list);
> +		goto done;
> +	} else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
>  		struct request *old_rq =3D NULL;
> =20
>  		blk_mq_bio_to_request(rq, bio);

Bart.=

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly
  2017-03-13 15:48 ` [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly Christoph Hellwig
@ 2017-03-13 21:02   ` Bart Van Assche
  0 siblings, 0 replies; 11+ messages in thread
From: Bart Van Assche @ 2017-03-13 21:02 UTC (permalink / raw)
  To: hch, axboe; +Cc: linux-block

On Mon, 2017-03-13 at 09:48 -0600, Christoph Hellwig wrote:
> Rename blk_mq_try_issue_directly to __blk_mq_try_issue_directly and add a
> new wrapper that takes care of RCU / SRCU locking to avoid having
> boileplate code in the caller which would get duplicated with new callers=
.

Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>=

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE
  2017-03-13 20:52   ` Bart Van Assche
@ 2017-03-13 23:16     ` hch
  0 siblings, 0 replies; 11+ messages in thread
From: hch @ 2017-03-13 23:16 UTC (permalink / raw)
  To: Bart Van Assche; +Cc: hch, axboe, linux-block

On Mon, Mar 13, 2017 at 08:52:54PM +0000, Bart Van Assche wrote:
> > -	if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
> > -	    !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
> > +	if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
> 
> A minor comment: due to this change the outer pair of parentheses
> became superfluous. Please consider removing these.

The last patch in the series removes the statement in this form. But
if I have to respin the series for some reason I'll make sure it
gets removed here already.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/4] blk-mq: merge mq and sq make_request instances
  2017-03-13 21:01   ` Bart Van Assche
@ 2017-03-13 23:17     ` hch
  0 siblings, 0 replies; 11+ messages in thread
From: hch @ 2017-03-13 23:17 UTC (permalink / raw)
  To: Bart Van Assche; +Cc: hch, axboe, linux-block

On Mon, Mar 13, 2017 at 09:01:08PM +0000, Bart Van Assche wrote:
> > +		/*
> > +		 * @request_count may become stale because of schedule
> > +		 * out, so check the list again.
> > +		 */
> 
> The above comment was relevant as long as there was a request_count assignment
> above blk_mq_sched_get_request(). This patch moves that assignment inside if
> (plug && q->nr_hw_queues == 1). Does that mean that the above comment should be
> removed entirely?

I don't think so - for the !blk_queue_nomerges cases we still rely
on blk_attempt_plug_merge calculatіng request_count, so the comment
still applies.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/4] blk-mq: streamline blk_mq_make_request
  2017-03-13 15:48 ` [PATCH 4/4] blk-mq: streamline blk_mq_make_request Christoph Hellwig
@ 2017-03-14 15:40   ` Bart Van Assche
  0 siblings, 0 replies; 11+ messages in thread
From: Bart Van Assche @ 2017-03-14 15:40 UTC (permalink / raw)
  To: hch, axboe; +Cc: linux-block

On Mon, 2017-03-13 at 09:48 -0600, Christoph Hellwig wrote:
> Turn the different ways of merging or issuing I/O into a series of if/els=
e
> statements instead of the current maze of gotos.  Note that this means we
> pin the CPU a little longer for some cases as the CTX put is moved to
> common code at the end of the function.
>=20
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  block/blk-mq.c | 67 +++++++++++++++++++++++-----------------------------=
------
>  1 file changed, 27 insertions(+), 40 deletions(-)
>=20
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 48748cb799ed..18e449cc832f 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -1534,16 +1534,17 @@ static blk_qc_t blk_mq_make_request(struct reques=
t_queue *q, struct bio *bio)
> =20
>  	cookie =3D request_to_qc_t(data.hctx, rq);
> =20
> +	plug =3D current->plug;
>  	if (unlikely(is_flush_fua)) {
> -		if (q->elevator)
> -			goto elv_insert;
>  		blk_mq_bio_to_request(rq, bio);
> -		blk_insert_flush(rq);
> -		goto run_queue;
> -	}
> -
> -	plug =3D current->plug;
> -	if (plug && q->nr_hw_queues =3D=3D 1) {
> +		if (q->elevator) {
> +			blk_mq_sched_insert_request(rq, false, true,
> +						!is_sync || is_flush_fua, true);
> +		} else {
> +			blk_insert_flush(rq);
> +			blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
> +		}
> +	} else if (plug && q->nr_hw_queues =3D=3D 1) {
>  		struct request *last =3D NULL;
> =20
>  		blk_mq_bio_to_request(rq, bio);

This change introduces the following construct:

if (is_flush_fua) {
	/* use is_flush_fua */
} else ...

Have you considered to simplify the code that uses is_flush_fua further?

> @@ -1562,8 +1563,6 @@ static blk_qc_t blk_mq_make_request(struct request_=
queue *q, struct bio *bio)
>  		else
>  			last =3D list_entry_rq(plug->mq_list.prev);
> =20
> -		blk_mq_put_ctx(data.ctx);
> -
>  		if (request_count >=3D BLK_MAX_REQUEST_COUNT || (last &&
>  		    blk_rq_bytes(last) >=3D BLK_PLUG_FLUSH_SIZE)) {
>  			blk_flush_plug_list(plug, false);
> @@ -1571,56 +1570,44 @@ static blk_qc_t blk_mq_make_request(struct reques=
t_queue *q, struct bio *bio)
>  		}
> =20
>  		list_add_tail(&rq->queuelist, &plug->mq_list);
> -		goto done;
> -	} else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
> -		struct request *old_rq =3D NULL;
> -
> +	} else if (plug && !blk_queue_nomerges(q)) {
>  		blk_mq_bio_to_request(rq, bio);
> =20
>  		/*
>  		 * We do limited plugging. If the bio can be merged, do that.
>  		 * Otherwise the existing request in the plug list will be
>  		 * issued. So the plug list will have one request at most
> +		 *
> +		 * The plug list might get flushed before this. If that happens,
> +		 * the plug list is emptry and same_queue_rq is invalid.
>  		 */

"emptry" looks like a typo?

> -		if (plug) {
> -			/*
> -			 * The plug list might get flushed before this. If that
> -			 * happens, same_queue_rq is invalid and plug list is
> -			 * empty
> -			 */
> -			if (same_queue_rq && !list_empty(&plug->mq_list)) {
> -				old_rq =3D same_queue_rq;
> -				list_del_init(&old_rq->queuelist);
> -			}
> -			list_add_tail(&rq->queuelist, &plug->mq_list);
> -		} else /* is_sync */
> -			old_rq =3D rq;
> -		blk_mq_put_ctx(data.ctx);
> -		if (old_rq)
> -			blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
> -		goto done;
> -	}
> +		if (!list_empty(&plug->mq_list))
> +			list_del_init(&same_queue_rq->queuelist);
> +		else
> +			same_queue_rq =3D NULL;
> =20
> -	if (q->elevator) {
> -elv_insert:
> -		blk_mq_put_ctx(data.ctx);
> +		list_add_tail(&rq->queuelist, &plug->mq_list);
> +		if (same_queue_rq)
> +			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
> +					&cookie);
> +	} else if (is_sync) {
> +		blk_mq_bio_to_request(rq, bio);
> +		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
> +	} else if (q->elevator) {
>  		blk_mq_bio_to_request(rq, bio);
>  		blk_mq_sched_insert_request(rq, false, true,
>  						!is_sync || is_flush_fua, true);
> -		goto done;
> -	}
> -	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
> +	} else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
>  		/*
>  		 * For a SYNC request, send it to the hardware immediately. For
>  		 * an ASYNC request, just ensure that we run it later on. The
>  		 * latter allows for merging opportunities and more efficient
>  		 * dispatching.
>  		 */
> -run_queue:
>  		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
>  	}

Since this code occurs in the else branch of an if (is_flush_fua) statement=
,
can "|| is_flush_fua" be left out?

What I noticed while reviewing this patch is that there are multiple change=
s in
this patch: not only the goto statements have been eliminated but the old_r=
q
variable has been eliminated too. I think this patch would be easier to rev=
iew
if it would be split in three patches: one that removes the old_rq variable=
,
one that eliminates the goto statements and one that removes dead code.

Thanks,

Bart.=

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2017-03-14 15:40 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-03-13 15:48 unify and streamline the blk-mq make_request implementations Christoph Hellwig
2017-03-13 15:48 ` [PATCH 1/4] blk-mq: remove BLK_MQ_F_DEFER_ISSUE Christoph Hellwig
2017-03-13 20:52   ` Bart Van Assche
2017-03-13 23:16     ` hch
2017-03-13 15:48 ` [PATCH 2/4] blk-mq: merge mq and sq make_request instances Christoph Hellwig
2017-03-13 21:01   ` Bart Van Assche
2017-03-13 23:17     ` hch
2017-03-13 15:48 ` [PATCH 3/4] blk-mq: improve blk_mq_try_issue_directly Christoph Hellwig
2017-03-13 21:02   ` Bart Van Assche
2017-03-13 15:48 ` [PATCH 4/4] blk-mq: streamline blk_mq_make_request Christoph Hellwig
2017-03-14 15:40   ` Bart Van Assche

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.