All of lore.kernel.org
 help / color / mirror / Atom feed
* streamline blk-mq I/O scheduler interaction
@ 2017-06-16 16:15 Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 01/10] blk-mq: mark blk_mq_rq_ctx_init static Christoph Hellwig
                   ` (11 more replies)
  0 siblings, 12 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

Hi blk-mq І/O scheduler folks,

I've been trying to re-learn our I/O path after the scheduler additions,
and when walking through the code the following optimization came to
my mind.  I've only done basic testing with them, so for now this is
a bit of a RFC.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 01/10] blk-mq: mark blk_mq_rq_ctx_init static
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 02/10] blk-mq: move blk_mq_sched_{get,put}_request to blk-mq.c Christoph Hellwig
                   ` (10 subsequent siblings)
  11 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 5 ++---
 block/blk-mq.h | 2 --
 2 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 359d2dc0d414..e1d650804c8e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -204,8 +204,8 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 }
 EXPORT_SYMBOL(blk_mq_can_queue);
 
-void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-			struct request *rq, unsigned int op)
+static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
+		struct request *rq, unsigned int op)
 {
 	INIT_LIST_HEAD(&rq->queuelist);
 	/* csd/requeue_work/fifo_time is initialized before use */
@@ -243,7 +243,6 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 
 	ctx->rq_dispatched[op_is_sync(op)]++;
 }
-EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
 
 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
 				       unsigned int op)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index cc67b48e3551..806fed53f607 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -131,8 +131,6 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
 /*
  * Internal helpers for request allocation/init/free
  */
-void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-			struct request *rq, unsigned int op);
 void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 				struct request *rq);
 void blk_mq_finish_request(struct request *rq);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 02/10] blk-mq: move blk_mq_sched_{get,put}_request to blk-mq.c
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 01/10] blk-mq: mark blk_mq_rq_ctx_init static Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 03/10] blk-mq: remove blk_mq_sched_{get,put}_rq_priv Christoph Hellwig
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

Having them out of line in blk-mq-sched.c just makes the code flow
unnecessarily complicated.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq-sched.c | 69 ++--------------------------------------------------
 block/blk-mq-sched.h |  4 +--
 block/blk-mq.c       | 67 +++++++++++++++++++++++++++++++++++++++++++++++---
 3 files changed, 67 insertions(+), 73 deletions(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c4e2afb9d12d..62db188595dc 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -58,8 +58,8 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
 	rq->elv.icq = NULL;
 }
 
-static void blk_mq_sched_assign_ioc(struct request_queue *q,
-				    struct request *rq, struct bio *bio)
+void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq,
+			     struct bio *bio)
 {
 	struct io_context *ioc;
 
@@ -68,71 +68,6 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
 		__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
 }
 
-struct request *blk_mq_sched_get_request(struct request_queue *q,
-					 struct bio *bio,
-					 unsigned int op,
-					 struct blk_mq_alloc_data *data)
-{
-	struct elevator_queue *e = q->elevator;
-	struct request *rq;
-
-	blk_queue_enter_live(q);
-	data->q = q;
-	if (likely(!data->ctx))
-		data->ctx = blk_mq_get_ctx(q);
-	if (likely(!data->hctx))
-		data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
-
-	if (e) {
-		data->flags |= BLK_MQ_REQ_INTERNAL;
-
-		/*
-		 * Flush requests are special and go directly to the
-		 * dispatch list.
-		 */
-		if (!op_is_flush(op) && e->type->ops.mq.get_request) {
-			rq = e->type->ops.mq.get_request(q, op, data);
-			if (rq)
-				rq->rq_flags |= RQF_QUEUED;
-		} else
-			rq = __blk_mq_alloc_request(data, op);
-	} else {
-		rq = __blk_mq_alloc_request(data, op);
-	}
-
-	if (rq) {
-		if (!op_is_flush(op)) {
-			rq->elv.icq = NULL;
-			if (e && e->type->icq_cache)
-				blk_mq_sched_assign_ioc(q, rq, bio);
-		}
-		data->hctx->queued++;
-		return rq;
-	}
-
-	blk_queue_exit(q);
-	return NULL;
-}
-
-void blk_mq_sched_put_request(struct request *rq)
-{
-	struct request_queue *q = rq->q;
-	struct elevator_queue *e = q->elevator;
-
-	if (rq->rq_flags & RQF_ELVPRIV) {
-		blk_mq_sched_put_rq_priv(rq->q, rq);
-		if (rq->elv.icq) {
-			put_io_context(rq->elv.icq->ioc);
-			rq->elv.icq = NULL;
-		}
-	}
-
-	if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
-		e->type->ops.mq.put_request(rq);
-	else
-		blk_mq_finish_request(rq);
-}
-
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 {
 	struct request_queue *q = hctx->queue;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index b87e5be5db8c..5d12529538d0 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -7,8 +7,8 @@
 void blk_mq_sched_free_hctx_data(struct request_queue *q,
 				 void (*exit)(struct blk_mq_hw_ctx *));
 
-struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
-void blk_mq_sched_put_request(struct request *rq);
+void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq,
+			     struct bio *bio);
 
 void blk_mq_sched_request_inserted(struct request *rq);
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e1d650804c8e..694cbd698507 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -277,6 +277,51 @@ struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
 }
 EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
 
+static struct request *blk_mq_get_request(struct request_queue *q,
+		struct bio *bio, unsigned int op,
+		struct blk_mq_alloc_data *data)
+{
+	struct elevator_queue *e = q->elevator;
+	struct request *rq;
+
+	blk_queue_enter_live(q);
+	data->q = q;
+	if (likely(!data->ctx))
+		data->ctx = blk_mq_get_ctx(q);
+	if (likely(!data->hctx))
+		data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
+
+	if (e) {
+		data->flags |= BLK_MQ_REQ_INTERNAL;
+
+		/*
+		 * Flush requests are special and go directly to the
+		 * dispatch list.
+		 */
+		if (!op_is_flush(op) && e->type->ops.mq.get_request) {
+			rq = e->type->ops.mq.get_request(q, op, data);
+			if (rq)
+				rq->rq_flags |= RQF_QUEUED;
+		} else
+			rq = __blk_mq_alloc_request(data, op);
+	} else {
+		rq = __blk_mq_alloc_request(data, op);
+	}
+
+	if (rq) {
+		if (!op_is_flush(op)) {
+			rq->elv.icq = NULL;
+			if (e && e->type->icq_cache)
+				blk_mq_sched_assign_ioc(q, rq, bio);
+		}
+		data->hctx->queued++;
+		return rq;
+	}
+
+	blk_queue_exit(q);
+	return NULL;
+}
+
 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
 		unsigned int flags)
 {
@@ -288,7 +333,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
 	if (ret)
 		return ERR_PTR(ret);
 
-	rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
+	rq = blk_mq_get_request(q, NULL, rw, &alloc_data);
 
 	blk_mq_put_ctx(alloc_data.ctx);
 	blk_queue_exit(q);
@@ -339,7 +384,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
 	cpu = cpumask_first(alloc_data.hctx->cpumask);
 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
 
-	rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
+	rq = blk_mq_get_request(q, NULL, rw, &alloc_data);
 
 	blk_queue_exit(q);
 
@@ -389,7 +434,21 @@ EXPORT_SYMBOL_GPL(blk_mq_finish_request);
 
 void blk_mq_free_request(struct request *rq)
 {
-	blk_mq_sched_put_request(rq);
+	struct request_queue *q = rq->q;
+	struct elevator_queue *e = q->elevator;
+
+	if (rq->rq_flags & RQF_ELVPRIV) {
+		blk_mq_sched_put_rq_priv(rq->q, rq);
+		if (rq->elv.icq) {
+			put_io_context(rq->elv.icq->ioc);
+			rq->elv.icq = NULL;
+		}
+	}
+
+	if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
+		e->type->ops.mq.put_request(rq);
+	else
+		blk_mq_finish_request(rq);
 }
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
@@ -1494,7 +1553,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 	trace_block_getrq(q, bio, bio->bi_opf);
 
-	rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
+	rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
 	if (unlikely(!rq)) {
 		__wbt_done(q->rq_wb, wb_acct);
 		return BLK_QC_T_NONE;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 03/10] blk-mq: remove blk_mq_sched_{get,put}_rq_priv
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 01/10] blk-mq: mark blk_mq_rq_ctx_init static Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 02/10] blk-mq: move blk_mq_sched_{get,put}_request to blk-mq.c Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 04/10] blk-mq-sched: unify request finished methods Christoph Hellwig
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

Having these as separate helpers in a header really does not help
readability, or my chances to refactor this code sanely.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq-sched.c | 10 ++++++----
 block/blk-mq-sched.h | 21 ---------------------
 block/blk-mq.c       |  3 ++-
 3 files changed, 8 insertions(+), 26 deletions(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 62db188595dc..22601e5c6f19 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -36,6 +36,7 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
 				      struct bio *bio,
 				      struct io_context *ioc)
 {
+	struct elevator_queue *e = q->elevator;
 	struct io_cq *icq;
 
 	spin_lock_irq(q->queue_lock);
@@ -49,13 +50,14 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
 	}
 
 	rq->elv.icq = icq;
-	if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
-		rq->rq_flags |= RQF_ELVPRIV;
-		get_io_context(icq->ioc);
+	if (e && e->type->ops.mq.get_rq_priv &&
+	    e->type->ops.mq.get_rq_priv(q, rq, bio)) {
+		rq->elv.icq = NULL;
 		return;
 	}
 
-	rq->elv.icq = NULL;
+	rq->rq_flags |= RQF_ELVPRIV;
+	get_io_context(icq->ioc);
 }
 
 void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq,
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 5d12529538d0..f34e6a522105 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -44,27 +44,6 @@ blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
 	return __blk_mq_sched_bio_merge(q, bio);
 }
 
-static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
-					   struct request *rq,
-					   struct bio *bio)
-{
-	struct elevator_queue *e = q->elevator;
-
-	if (e && e->type->ops.mq.get_rq_priv)
-		return e->type->ops.mq.get_rq_priv(q, rq, bio);
-
-	return 0;
-}
-
-static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
-					    struct request *rq)
-{
-	struct elevator_queue *e = q->elevator;
-
-	if (e && e->type->ops.mq.put_rq_priv)
-		e->type->ops.mq.put_rq_priv(q, rq);
-}
-
 static inline bool
 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
 			 struct bio *bio)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 694cbd698507..1a45c287db64 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -438,7 +438,8 @@ void blk_mq_free_request(struct request *rq)
 	struct elevator_queue *e = q->elevator;
 
 	if (rq->rq_flags & RQF_ELVPRIV) {
-		blk_mq_sched_put_rq_priv(rq->q, rq);
+		if (e && e->type->ops.mq.put_rq_priv)
+			e->type->ops.mq.put_rq_priv(q, rq);
 		if (rq->elv.icq) {
 			put_io_context(rq->elv.icq->ioc);
 			rq->elv.icq = NULL;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 04/10] blk-mq-sched: unify request finished methods
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (2 preceding siblings ...)
  2017-06-16 16:15 ` [PATCH 03/10] blk-mq: remove blk_mq_sched_{get,put}_rq_priv Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-19 13:01   ` Paolo Valente
  2017-06-16 16:15 ` [PATCH 05/10] blk-mq: simplify blk_mq_free_request Christoph Hellwig
                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

No need to have two different callouts of bfq vs kyber.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bfq-iosched.c      |  6 +++---
 block/blk-mq.c           | 11 ++++-------
 block/kyber-iosched.c    |  8 +++-----
 include/linux/elevator.h |  3 +--
 4 files changed, 11 insertions(+), 17 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index ed93da2462ab..4f69e39c2f89 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4290,7 +4290,7 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
 	bfq_put_queue(bfqq);
 }
 
-static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+static void bfq_finish_request(struct request *rq)
 {
 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
 	struct bfq_data *bfqd = bfqq->bfqd;
@@ -4324,7 +4324,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
 		 */
 
 		if (!RB_EMPTY_NODE(&rq->rb_node))
-			bfq_remove_request(q, rq);
+			bfq_remove_request(rq->q, rq);
 		bfq_put_rq_priv_body(bfqq);
 	}
 
@@ -4951,7 +4951,7 @@ static struct elv_fs_entry bfq_attrs[] = {
 static struct elevator_type iosched_bfq_mq = {
 	.ops.mq = {
 		.get_rq_priv		= bfq_get_rq_private,
-		.put_rq_priv		= bfq_put_rq_private,
+		.finish_request		= bfq_finish_request,
 		.exit_icq		= bfq_exit_icq,
 		.insert_requests	= bfq_insert_requests,
 		.dispatch_request	= bfq_dispatch_request,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1a45c287db64..9df7e0394a48 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -437,19 +437,16 @@ void blk_mq_free_request(struct request *rq)
 	struct request_queue *q = rq->q;
 	struct elevator_queue *e = q->elevator;
 
-	if (rq->rq_flags & RQF_ELVPRIV) {
-		if (e && e->type->ops.mq.put_rq_priv)
-			e->type->ops.mq.put_rq_priv(q, rq);
+	if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) {
+		if (e && e->type->ops.mq.finish_request)
+			e->type->ops.mq.finish_request(rq);
 		if (rq->elv.icq) {
 			put_io_context(rq->elv.icq->ioc);
 			rq->elv.icq = NULL;
 		}
 	}
 
-	if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
-		e->type->ops.mq.put_request(rq);
-	else
-		blk_mq_finish_request(rq);
+	blk_mq_finish_request(rq);
 }
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index b9faabc75fdb..2557b399f0a8 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -446,13 +446,11 @@ static struct request *kyber_get_request(struct request_queue *q,
 	return rq;
 }
 
-static void kyber_put_request(struct request *rq)
+static void kyber_finish_request(struct request *rq)
 {
-	struct request_queue *q = rq->q;
-	struct kyber_queue_data *kqd = q->elevator->elevator_data;
+	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
 
 	rq_clear_domain_token(kqd, rq);
-	blk_mq_finish_request(rq);
 }
 
 static void kyber_completed_request(struct request *rq)
@@ -816,7 +814,7 @@ static struct elevator_type kyber_sched = {
 		.init_hctx = kyber_init_hctx,
 		.exit_hctx = kyber_exit_hctx,
 		.get_request = kyber_get_request,
-		.put_request = kyber_put_request,
+		.finish_request = kyber_finish_request,
 		.completed_request = kyber_completed_request,
 		.dispatch_request = kyber_dispatch_request,
 		.has_work = kyber_has_work,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 0e306c5a86d6..4acea351d43f 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -105,7 +105,7 @@ struct elevator_mq_ops {
 	void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
 	void (*requests_merged)(struct request_queue *, struct request *, struct request *);
 	struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *);
-	void (*put_request)(struct request *);
+	void (*finish_request)(struct request *);
 	void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
 	struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
 	bool (*has_work)(struct blk_mq_hw_ctx *);
@@ -115,7 +115,6 @@ struct elevator_mq_ops {
 	struct request *(*former_request)(struct request_queue *, struct request *);
 	struct request *(*next_request)(struct request_queue *, struct request *);
 	int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *);
-	void (*put_rq_priv)(struct request_queue *, struct request *);
 	void (*init_icq)(struct io_cq *);
 	void (*exit_icq)(struct io_cq *);
 };
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 05/10] blk-mq: simplify blk_mq_free_request
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (3 preceding siblings ...)
  2017-06-16 16:15 ` [PATCH 04/10] blk-mq-sched: unify request finished methods Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-17 21:50   ` Jens Axboe
  2017-06-16 16:15 ` [PATCH 06/10] blk-mq: streamline blk_mq_get_request Christoph Hellwig
                   ` (6 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

Merge three functions only tail-called by blk_mq_free_request into
blk_mq_free_request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 50 +++++++++++++++-----------------------------------
 block/blk-mq.h |  3 ---
 2 files changed, 15 insertions(+), 38 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9df7e0394a48..0b17351fccfc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -395,12 +395,24 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
-void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-			     struct request *rq)
+void blk_mq_free_request(struct request *rq)
 {
-	const int sched_tag = rq->internal_tag;
 	struct request_queue *q = rq->q;
+	struct elevator_queue *e = q->elevator;
+	struct blk_mq_ctx *ctx = rq->mq_ctx;
+	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+	const int sched_tag = rq->internal_tag;
 
+	if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) {
+		if (e && e->type->ops.mq.finish_request)
+			e->type->ops.mq.finish_request(rq);
+		if (rq->elv.icq) {
+			put_io_context(rq->elv.icq->ioc);
+			rq->elv.icq = NULL;
+		}
+	}
+
+	ctx->rq_completed[rq_is_sync(rq)]++;
 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
 		atomic_dec(&hctx->nr_active);
 
@@ -416,38 +428,6 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 	blk_mq_sched_restart(hctx);
 	blk_queue_exit(q);
 }
-
-static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
-				     struct request *rq)
-{
-	struct blk_mq_ctx *ctx = rq->mq_ctx;
-
-	ctx->rq_completed[rq_is_sync(rq)]++;
-	__blk_mq_finish_request(hctx, ctx, rq);
-}
-
-void blk_mq_finish_request(struct request *rq)
-{
-	blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
-}
-EXPORT_SYMBOL_GPL(blk_mq_finish_request);
-
-void blk_mq_free_request(struct request *rq)
-{
-	struct request_queue *q = rq->q;
-	struct elevator_queue *e = q->elevator;
-
-	if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) {
-		if (e && e->type->ops.mq.finish_request)
-			e->type->ops.mq.finish_request(rq);
-		if (rq->elv.icq) {
-			put_io_context(rq->elv.icq->ioc);
-			rq->elv.icq = NULL;
-		}
-	}
-
-	blk_mq_finish_request(rq);
-}
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 806fed53f607..6a509a8eb3fb 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -131,9 +131,6 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
 /*
  * Internal helpers for request allocation/init/free
  */
-void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-				struct request *rq);
-void blk_mq_finish_request(struct request *rq);
 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
 					unsigned int op);
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 06/10] blk-mq: streamline blk_mq_get_request
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (4 preceding siblings ...)
  2017-06-16 16:15 ` [PATCH 05/10] blk-mq: simplify blk_mq_free_request Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 07/10] bfq-iosched: fix NULL ioc check in bfq_get_rq_private Christoph Hellwig
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 28 ++++++++++++++--------------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0b17351fccfc..e056725679a8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -302,24 +302,24 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 			rq = e->type->ops.mq.get_request(q, op, data);
 			if (rq)
 				rq->rq_flags |= RQF_QUEUED;
-		} else
-			rq = __blk_mq_alloc_request(data, op);
-	} else {
-		rq = __blk_mq_alloc_request(data, op);
+			goto allocated;
+		}
 	}
 
-	if (rq) {
-		if (!op_is_flush(op)) {
-			rq->elv.icq = NULL;
-			if (e && e->type->icq_cache)
-				blk_mq_sched_assign_ioc(q, rq, bio);
-		}
-		data->hctx->queued++;
-		return rq;
+	rq = __blk_mq_alloc_request(data, op);
+allocated:
+	if (!rq) {
+		blk_queue_exit(q);
+		return NULL;
 	}
 
-	blk_queue_exit(q);
-	return NULL;
+	if (!op_is_flush(op)) {
+		rq->elv.icq = NULL;
+		if (e && e->type->icq_cache)
+			blk_mq_sched_assign_ioc(q, rq, bio);
+	}
+	data->hctx->queued++;
+	return rq;
 }
 
 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 07/10] bfq-iosched: fix NULL ioc check in bfq_get_rq_private
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (5 preceding siblings ...)
  2017-06-16 16:15 ` [PATCH 06/10] blk-mq: streamline blk_mq_get_request Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-19 13:14   ` Paolo Valente
  2017-06-16 16:15 ` [PATCH 08/10] blk-mq: refactor blk_mq_sched_assign_ioc Christoph Hellwig
                   ` (4 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

icq_to_bic is a container_of operation, so we need to check for NULL
before it.  Also move the check outside the spinlock while we're at
it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bfq-iosched.c | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4f69e39c2f89..f037b005faa1 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4398,16 +4398,17 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
 			      struct bio *bio)
 {
 	struct bfq_data *bfqd = q->elevator->elevator_data;
-	struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
+	struct bfq_io_cq *bic;
 	const int is_sync = rq_is_sync(rq);
 	struct bfq_queue *bfqq;
 	bool new_queue = false;
 	bool split = false;
 
-	spin_lock_irq(&bfqd->lock);
+	if (!rq->elv.icq)
+		return 1;
+	bic = icq_to_bic(rq->elv.icq);
 
-	if (!bic)
-		goto queue_fail;
+	spin_lock_irq(&bfqd->lock);
 
 	bfq_check_ioprio_change(bic, bio);
 
@@ -4465,13 +4466,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
 		bfq_handle_burst(bfqd, bfqq);
 
 	spin_unlock_irq(&bfqd->lock);
-
 	return 0;
-
-queue_fail:
-	spin_unlock_irq(&bfqd->lock);
-
-	return 1;
 }
 
 static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 08/10] blk-mq: refactor blk_mq_sched_assign_ioc
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (6 preceding siblings ...)
  2017-06-16 16:15 ` [PATCH 07/10] bfq-iosched: fix NULL ioc check in bfq_get_rq_private Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-16 16:15 ` [PATCH 09/10] blk-mq-sched: unify request prepare methods Christoph Hellwig
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

blk_mq_sched_assign_ioc now only handles the assigned of the ioc if
the schedule needs it (bfq only at the moment).  The caller to the
per-request initializer is moved out so that it can be merged with
a similar call for the kyber I/O scheduler.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq-sched.c | 28 ++++------------------------
 block/blk-mq-sched.h |  3 +--
 block/blk-mq.c       | 14 ++++++++++++--
 3 files changed, 17 insertions(+), 28 deletions(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 22601e5c6f19..254d1c164567 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -31,12 +31,10 @@ void blk_mq_sched_free_hctx_data(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
 
-static void __blk_mq_sched_assign_ioc(struct request_queue *q,
-				      struct request *rq,
-				      struct bio *bio,
-				      struct io_context *ioc)
+void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
 {
-	struct elevator_queue *e = q->elevator;
+	struct request_queue *q = rq->q;
+	struct io_context *ioc = rq_ioc(bio);
 	struct io_cq *icq;
 
 	spin_lock_irq(q->queue_lock);
@@ -48,26 +46,8 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
 		if (!icq)
 			return;
 	}
-
-	rq->elv.icq = icq;
-	if (e && e->type->ops.mq.get_rq_priv &&
-	    e->type->ops.mq.get_rq_priv(q, rq, bio)) {
-		rq->elv.icq = NULL;
-		return;
-	}
-
-	rq->rq_flags |= RQF_ELVPRIV;
 	get_io_context(icq->ioc);
-}
-
-void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq,
-			     struct bio *bio)
-{
-	struct io_context *ioc;
-
-	ioc = rq_ioc(bio);
-	if (ioc)
-		__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
+	rq->elv.icq = icq;
 }
 
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index f34e6a522105..e117edd039b1 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -7,8 +7,7 @@
 void blk_mq_sched_free_hctx_data(struct request_queue *q,
 				 void (*exit)(struct blk_mq_hw_ctx *));
 
-void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq,
-			     struct bio *bio);
+void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
 
 void blk_mq_sched_request_inserted(struct request *rq);
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e056725679a8..2f380ab7a603 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -315,8 +315,18 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 
 	if (!op_is_flush(op)) {
 		rq->elv.icq = NULL;
-		if (e && e->type->icq_cache)
-			blk_mq_sched_assign_ioc(q, rq, bio);
+		if (e && e->type->ops.mq.get_rq_priv) {
+			if (e->type->icq_cache && rq_ioc(bio))
+				blk_mq_sched_assign_ioc(rq, bio);
+
+			if (e->type->ops.mq.get_rq_priv(q, rq, bio)) {
+				if (rq->elv.icq)
+					put_io_context(rq->elv.icq->ioc);
+				rq->elv.icq = NULL;
+			} else {
+				rq->rq_flags |= RQF_ELVPRIV;
+			}
+		}
 	}
 	data->hctx->queued++;
 	return rq;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 09/10] blk-mq-sched: unify request prepare methods
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (7 preceding siblings ...)
  2017-06-16 16:15 ` [PATCH 08/10] blk-mq: refactor blk_mq_sched_assign_ioc Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-19 13:32   ` Paolo Valente
  2017-06-16 16:15 ` [PATCH 10/10] blk-mq: remove __blk_mq_alloc_request Christoph Hellwig
                   ` (2 subsequent siblings)
  11 siblings, 1 reply; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

This patch makes sure we always allocate requests in the core blk-mq
code and use a common prepare_request method to initialize them for
both mq I/O schedulers.  For Kyber and additional limit_depth method
is added that is called before allocating the request.

Also because none of the intializations can really fail the new method
does not return an error - instead the bfq finish method is hardened
to deal with the no-IOC case.

Last but not least this removes the abuse of RQF_QUEUE by the blk-mq
scheduling code as RQF_ELFPRIV is all that is needed now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bfq-iosched.c      | 19 ++++++++++++-------
 block/blk-mq.c           | 22 ++++++----------------
 block/kyber-iosched.c    | 23 +++++++++++------------
 include/linux/elevator.h |  4 ++--
 4 files changed, 31 insertions(+), 37 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f037b005faa1..60d32700f104 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4292,8 +4292,14 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
 
 static void bfq_finish_request(struct request *rq)
 {
-	struct bfq_queue *bfqq = RQ_BFQQ(rq);
-	struct bfq_data *bfqd = bfqq->bfqd;
+	struct bfq_queue *bfqq;
+	struct bfq_data *bfqd;
+
+	if (!rq->elv.icq)
+		return;
+
+	bfqq = RQ_BFQQ(rq);
+	bfqd = bfqq->bfqd;
 
 	if (rq->rq_flags & RQF_STARTED)
 		bfqg_stats_update_completion(bfqq_group(bfqq),
@@ -4394,9 +4400,9 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
 /*
  * Allocate bfq data structures associated with this request.
  */
-static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
-			      struct bio *bio)
+static void bfq_prepare_request(struct request *rq, struct bio *bio)
 {
+	struct request_queue *q = rq->q;
 	struct bfq_data *bfqd = q->elevator->elevator_data;
 	struct bfq_io_cq *bic;
 	const int is_sync = rq_is_sync(rq);
@@ -4405,7 +4411,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
 	bool split = false;
 
 	if (!rq->elv.icq)
-		return 1;
+		return;
 	bic = icq_to_bic(rq->elv.icq);
 
 	spin_lock_irq(&bfqd->lock);
@@ -4466,7 +4472,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
 		bfq_handle_burst(bfqd, bfqq);
 
 	spin_unlock_irq(&bfqd->lock);
-	return 0;
 }
 
 static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
@@ -4945,7 +4950,7 @@ static struct elv_fs_entry bfq_attrs[] = {
 
 static struct elevator_type iosched_bfq_mq = {
 	.ops.mq = {
-		.get_rq_priv		= bfq_get_rq_private,
+		.prepare_request	= bfq_prepare_request,
 		.finish_request		= bfq_finish_request,
 		.exit_icq		= bfq_exit_icq,
 		.insert_requests	= bfq_insert_requests,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2f380ab7a603..81d05c19d4b3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -298,16 +298,11 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 		 * Flush requests are special and go directly to the
 		 * dispatch list.
 		 */
-		if (!op_is_flush(op) && e->type->ops.mq.get_request) {
-			rq = e->type->ops.mq.get_request(q, op, data);
-			if (rq)
-				rq->rq_flags |= RQF_QUEUED;
-			goto allocated;
-		}
+		if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
+			e->type->ops.mq.limit_depth(op, data);
 	}
 
 	rq = __blk_mq_alloc_request(data, op);
-allocated:
 	if (!rq) {
 		blk_queue_exit(q);
 		return NULL;
@@ -315,17 +310,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 
 	if (!op_is_flush(op)) {
 		rq->elv.icq = NULL;
-		if (e && e->type->ops.mq.get_rq_priv) {
+		if (e && e->type->ops.mq.prepare_request) {
 			if (e->type->icq_cache && rq_ioc(bio))
 				blk_mq_sched_assign_ioc(rq, bio);
 
-			if (e->type->ops.mq.get_rq_priv(q, rq, bio)) {
-				if (rq->elv.icq)
-					put_io_context(rq->elv.icq->ioc);
-				rq->elv.icq = NULL;
-			} else {
-				rq->rq_flags |= RQF_ELVPRIV;
-			}
+			e->type->ops.mq.prepare_request(rq, bio);
+			rq->rq_flags |= RQF_ELVPRIV;
 		}
 	}
 	data->hctx->queued++;
@@ -413,7 +403,7 @@ void blk_mq_free_request(struct request *rq)
 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
 	const int sched_tag = rq->internal_tag;
 
-	if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) {
+	if (rq->rq_flags & RQF_ELVPRIV) {
 		if (e && e->type->ops.mq.finish_request)
 			e->type->ops.mq.finish_request(rq);
 		if (rq->elv.icq) {
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 2557b399f0a8..a9f6fd3fab8e 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -426,24 +426,22 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd,
 	}
 }
 
-static struct request *kyber_get_request(struct request_queue *q,
-					 unsigned int op,
-					 struct blk_mq_alloc_data *data)
+static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
 {
-	struct kyber_queue_data *kqd = q->elevator->elevator_data;
-	struct request *rq;
-
 	/*
 	 * We use the scheduler tags as per-hardware queue queueing tokens.
 	 * Async requests can be limited at this stage.
 	 */
-	if (!op_is_sync(op))
+	if (!op_is_sync(op)) {
+		struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
+
 		data->shallow_depth = kqd->async_depth;
+	}
+}
 
-	rq = __blk_mq_alloc_request(data, op);
-	if (rq)
-		rq_set_domain_token(rq, -1);
-	return rq;
+static void kyber_prepare_request(struct request *rq, struct bio *bio)
+{
+	rq_set_domain_token(rq, -1);
 }
 
 static void kyber_finish_request(struct request *rq)
@@ -813,7 +811,8 @@ static struct elevator_type kyber_sched = {
 		.exit_sched = kyber_exit_sched,
 		.init_hctx = kyber_init_hctx,
 		.exit_hctx = kyber_exit_hctx,
-		.get_request = kyber_get_request,
+		.limit_depth = kyber_limit_depth,
+		.prepare_request = kyber_prepare_request,
 		.finish_request = kyber_finish_request,
 		.completed_request = kyber_completed_request,
 		.dispatch_request = kyber_dispatch_request,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4acea351d43f..5bc8f8682a3e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -104,7 +104,8 @@ struct elevator_mq_ops {
 	int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
 	void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
 	void (*requests_merged)(struct request_queue *, struct request *, struct request *);
-	struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *);
+	void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
+	void (*prepare_request)(struct request *, struct bio *bio);
 	void (*finish_request)(struct request *);
 	void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
 	struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
@@ -114,7 +115,6 @@ struct elevator_mq_ops {
 	void (*requeue_request)(struct request *);
 	struct request *(*former_request)(struct request_queue *, struct request *);
 	struct request *(*next_request)(struct request_queue *, struct request *);
-	int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *);
 	void (*init_icq)(struct io_cq *);
 	void (*exit_icq)(struct io_cq *);
 };
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 10/10] blk-mq: remove __blk_mq_alloc_request
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (8 preceding siblings ...)
  2017-06-16 16:15 ` [PATCH 09/10] blk-mq-sched: unify request prepare methods Christoph Hellwig
@ 2017-06-16 16:15 ` Christoph Hellwig
  2017-06-16 20:30 ` streamline blk-mq I/O scheduler interaction Jens Axboe
  2017-06-18 18:35 ` Jens Axboe
  11 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-16 16:15 UTC (permalink / raw)
  To: Jens Axboe, Omar Sandoval, Paolo Valente; +Cc: linux-block

Move most code into blk_mq_rq_ctx_init, and the rest into
blk_mq_get_request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 68 +++++++++++++++++++++++-----------------------------------
 block/blk-mq.h |  6 ------
 2 files changed, 27 insertions(+), 47 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 81d05c19d4b3..be40c1d6e3a4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -204,15 +204,31 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 }
 EXPORT_SYMBOL(blk_mq_can_queue);
 
-static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-		struct request *rq, unsigned int op)
+static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
+		unsigned int tag, unsigned int op)
 {
+	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+	struct request *rq = tags->static_rqs[tag];
+
+	if (data->flags & BLK_MQ_REQ_INTERNAL) {
+		rq->tag = -1;
+		rq->internal_tag = tag;
+	} else {
+		if (blk_mq_tag_busy(data->hctx)) {
+			rq->rq_flags = RQF_MQ_INFLIGHT;
+			atomic_inc(&data->hctx->nr_active);
+		}
+		rq->tag = tag;
+		rq->internal_tag = -1;
+		data->hctx->tags->rqs[rq->tag] = rq;
+	}
+
 	INIT_LIST_HEAD(&rq->queuelist);
 	/* csd/requeue_work/fifo_time is initialized before use */
-	rq->q = q;
-	rq->mq_ctx = ctx;
+	rq->q = data->q;
+	rq->mq_ctx = data->ctx;
 	rq->cmd_flags = op;
-	if (blk_queue_io_stat(q))
+	if (blk_queue_io_stat(data->q))
 		rq->rq_flags |= RQF_IO_STAT;
 	/* do not touch atomic flags, it needs atomic ops against the timer */
 	rq->cpu = -1;
@@ -241,41 +257,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 	rq->end_io_data = NULL;
 	rq->next_rq = NULL;
 
-	ctx->rq_dispatched[op_is_sync(op)]++;
-}
-
-struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
-				       unsigned int op)
-{
-	struct request *rq;
-	unsigned int tag;
-
-	tag = blk_mq_get_tag(data);
-	if (tag != BLK_MQ_TAG_FAIL) {
-		struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
-
-		rq = tags->static_rqs[tag];
-
-		if (data->flags & BLK_MQ_REQ_INTERNAL) {
-			rq->tag = -1;
-			rq->internal_tag = tag;
-		} else {
-			if (blk_mq_tag_busy(data->hctx)) {
-				rq->rq_flags = RQF_MQ_INFLIGHT;
-				atomic_inc(&data->hctx->nr_active);
-			}
-			rq->tag = tag;
-			rq->internal_tag = -1;
-			data->hctx->tags->rqs[rq->tag] = rq;
-		}
-
-		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
-		return rq;
-	}
-
-	return NULL;
+	data->ctx->rq_dispatched[op_is_sync(op)]++;
+	return rq;
 }
-EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
 
 static struct request *blk_mq_get_request(struct request_queue *q,
 		struct bio *bio, unsigned int op,
@@ -283,6 +267,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 {
 	struct elevator_queue *e = q->elevator;
 	struct request *rq;
+	unsigned int tag;
 
 	blk_queue_enter_live(q);
 	data->q = q;
@@ -302,12 +287,13 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 			e->type->ops.mq.limit_depth(op, data);
 	}
 
-	rq = __blk_mq_alloc_request(data, op);
-	if (!rq) {
+	tag = blk_mq_get_tag(data);
+	if (tag == BLK_MQ_TAG_FAIL) {
 		blk_queue_exit(q);
 		return NULL;
 	}
 
+	rq = blk_mq_rq_ctx_init(data, tag, op);
 	if (!op_is_flush(op)) {
 		rq->elv.icq = NULL;
 		if (e && e->type->ops.mq.prepare_request) {
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6a509a8eb3fb..1a06fdf9fd4d 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -128,12 +128,6 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
 	return data->hctx->tags;
 }
 
-/*
- * Internal helpers for request allocation/init/free
- */
-struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
-					unsigned int op);
-
 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
 {
 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: streamline blk-mq I/O scheduler interaction
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (9 preceding siblings ...)
  2017-06-16 16:15 ` [PATCH 10/10] blk-mq: remove __blk_mq_alloc_request Christoph Hellwig
@ 2017-06-16 20:30 ` Jens Axboe
  2017-06-18 18:35 ` Jens Axboe
  11 siblings, 0 replies; 20+ messages in thread
From: Jens Axboe @ 2017-06-16 20:30 UTC (permalink / raw)
  To: Christoph Hellwig, Omar Sandoval, Paolo Valente; +Cc: linux-block

On 06/16/2017 10:15 AM, Christoph Hellwig wrote:
> Hi blk-mq І/O scheduler folks,
> 
> I've been trying to re-learn our I/O path after the scheduler additions,
> and when walking through the code the following optimization came to
> my mind.  I've only done basic testing with them, so for now this is
> a bit of a RFC.

Most of these look straight forward and are good cleanups. I'll throw
some testing at it, too.

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 05/10] blk-mq: simplify blk_mq_free_request
  2017-06-16 16:15 ` [PATCH 05/10] blk-mq: simplify blk_mq_free_request Christoph Hellwig
@ 2017-06-17 21:50   ` Jens Axboe
  2017-06-18  7:24     ` Christoph Hellwig
  0 siblings, 1 reply; 20+ messages in thread
From: Jens Axboe @ 2017-06-17 21:50 UTC (permalink / raw)
  To: Christoph Hellwig, Omar Sandoval, Paolo Valente; +Cc: linux-block

On 06/16/2017 10:15 AM, Christoph Hellwig wrote:
> Merge three functions only tail-called by blk_mq_free_request into
> blk_mq_free_request.

This doesn't compile, as blk_mq_merge_queue_io() has a call to
__blk_mq_finish_request().


-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 05/10] blk-mq: simplify blk_mq_free_request
  2017-06-17 21:50   ` Jens Axboe
@ 2017-06-18  7:24     ` Christoph Hellwig
  2017-06-18 15:05       ` Jens Axboe
  0 siblings, 1 reply; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-18  7:24 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Christoph Hellwig, Omar Sandoval, Paolo Valente, linux-block

On Sat, Jun 17, 2017 at 03:50:40PM -0600, Jens Axboe wrote:
> On 06/16/2017 10:15 AM, Christoph Hellwig wrote:
> > Merge three functions only tail-called by blk_mq_free_request into
> > blk_mq_free_request.
> 
> This doesn't compile, as blk_mq_merge_queue_io() has a call to
> __blk_mq_finish_request().

Hmm.  The tree I've based my work on certainly doesn't.  But it doesn't
even have blk_mq_merge_queue_io at all either, as does your for-4.13/block
block tree.

What am I missing?

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 05/10] blk-mq: simplify blk_mq_free_request
  2017-06-18  7:24     ` Christoph Hellwig
@ 2017-06-18 15:05       ` Jens Axboe
  0 siblings, 0 replies; 20+ messages in thread
From: Jens Axboe @ 2017-06-18 15:05 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Omar Sandoval, Paolo Valente, linux-block

On 06/18/2017 01:24 AM, Christoph Hellwig wrote:
> On Sat, Jun 17, 2017 at 03:50:40PM -0600, Jens Axboe wrote:
>> On 06/16/2017 10:15 AM, Christoph Hellwig wrote:
>>> Merge three functions only tail-called by blk_mq_free_request into
>>> blk_mq_free_request.
>>
>> This doesn't compile, as blk_mq_merge_queue_io() has a call to
>> __blk_mq_finish_request().
> 
> Hmm.  The tree I've based my work on certainly doesn't.  But it doesn't
> even have blk_mq_merge_queue_io at all either, as does your for-4.13/block
> block tree.
> 
> What am I missing?

I tossed it into master for testing by mistake. It's fine against
for-4.13/block, my fault.

I ran it through the regular testing and no issues uncovered so far.

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: streamline blk-mq I/O scheduler interaction
  2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
                   ` (10 preceding siblings ...)
  2017-06-16 20:30 ` streamline blk-mq I/O scheduler interaction Jens Axboe
@ 2017-06-18 18:35 ` Jens Axboe
  11 siblings, 0 replies; 20+ messages in thread
From: Jens Axboe @ 2017-06-18 18:35 UTC (permalink / raw)
  To: Christoph Hellwig, Omar Sandoval, Paolo Valente; +Cc: linux-block

On 06/16/2017 10:15 AM, Christoph Hellwig wrote:
> Hi blk-mq І/O scheduler folks,
> 
> I've been trying to re-learn our I/O path after the scheduler additions,
> and when walking through the code the following optimization came to
> my mind.  I've only done basic testing with them, so for now this is
> a bit of a RFC.

Added for 4.13, thanks.

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 04/10] blk-mq-sched: unify request finished methods
  2017-06-16 16:15 ` [PATCH 04/10] blk-mq-sched: unify request finished methods Christoph Hellwig
@ 2017-06-19 13:01   ` Paolo Valente
  0 siblings, 0 replies; 20+ messages in thread
From: Paolo Valente @ 2017-06-19 13:01 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Jens Axboe, Omar Sandoval, linux-block


> Il giorno 16 giu 2017, alle ore 18:15, Christoph Hellwig <hch@lst.de> =
ha scritto:
>=20
> No need to have two different callouts of bfq vs kyber.
>=20
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> block/bfq-iosched.c      |  6 +++---
> block/blk-mq.c           | 11 ++++-------
> block/kyber-iosched.c    |  8 +++-----
> include/linux/elevator.h |  3 +--
> 4 files changed, 11 insertions(+), 17 deletions(-)
>=20
> diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
> index ed93da2462ab..4f69e39c2f89 100644
> --- a/block/bfq-iosched.c
> +++ b/block/bfq-iosched.c
> @@ -4290,7 +4290,7 @@ static void bfq_put_rq_priv_body(struct =
bfq_queue *bfqq)
> 	bfq_put_queue(bfqq);
> }
>=20
> -static void bfq_put_rq_private(struct request_queue *q, struct =
request *rq)
> +static void bfq_finish_request(struct request *rq)
> {
> 	struct bfq_queue *bfqq =3D RQ_BFQQ(rq);
> 	struct bfq_data *bfqd =3D bfqq->bfqd;
> @@ -4324,7 +4324,7 @@ static void bfq_put_rq_private(struct =
request_queue *q, struct request *rq)
> 		 */
>=20
> 		if (!RB_EMPTY_NODE(&rq->rb_node))
> -			bfq_remove_request(q, rq);
> +			bfq_remove_request(rq->q, rq);
> 		bfq_put_rq_priv_body(bfqq);
> 	}
>=20
> @@ -4951,7 +4951,7 @@ static struct elv_fs_entry bfq_attrs[] =3D {
> static struct elevator_type iosched_bfq_mq =3D {
> 	.ops.mq =3D {
> 		.get_rq_priv		=3D bfq_get_rq_private,
> -		.put_rq_priv		=3D bfq_put_rq_private,
> +		.finish_request		=3D bfq_finish_request,
> 		.exit_icq		=3D bfq_exit_icq,
> 		.insert_requests	=3D bfq_insert_requests,
> 		.dispatch_request	=3D bfq_dispatch_request,
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 1a45c287db64..9df7e0394a48 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -437,19 +437,16 @@ void blk_mq_free_request(struct request *rq)
> 	struct request_queue *q =3D rq->q;
> 	struct elevator_queue *e =3D q->elevator;
>=20
> -	if (rq->rq_flags & RQF_ELVPRIV) {
> -		if (e && e->type->ops.mq.put_rq_priv)
> -			e->type->ops.mq.put_rq_priv(q, rq);
> +	if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) {
> +		if (e && e->type->ops.mq.finish_request)
> +			e->type->ops.mq.finish_request(rq);
> 		if (rq->elv.icq) {
> 			put_io_context(rq->elv.icq->ioc);
> 			rq->elv.icq =3D NULL;
> 		}
> 	}
>=20
> -	if ((rq->rq_flags & RQF_QUEUED) && e && =
e->type->ops.mq.put_request)
> -		e->type->ops.mq.put_request(rq);
> -	else
> -		blk_mq_finish_request(rq);
> +	blk_mq_finish_request(rq);
> }

My only concern is that this last change is, and will remain,
functionally equivalent to the previous version of the code (choice
between put_request and finish_request).  In fact, bfq doesn't use
put_request at all, and, in former bfq_put_rq_private assumes that
rq->elv.priv is still properly set.  If this change enjoys, as it
seems to me, this invariance, then, for the part related to bfq,

Acked-by: Paolo Valente <paolo.valente@linaro.org>


> EXPORT_SYMBOL_GPL(blk_mq_free_request);
>=20
> diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
> index b9faabc75fdb..2557b399f0a8 100644
> --- a/block/kyber-iosched.c
> +++ b/block/kyber-iosched.c
> @@ -446,13 +446,11 @@ static struct request *kyber_get_request(struct =
request_queue *q,
> 	return rq;
> }
>=20
> -static void kyber_put_request(struct request *rq)
> +static void kyber_finish_request(struct request *rq)
> {
> -	struct request_queue *q =3D rq->q;
> -	struct kyber_queue_data *kqd =3D q->elevator->elevator_data;
> +	struct kyber_queue_data *kqd =3D rq->q->elevator->elevator_data;
>=20
> 	rq_clear_domain_token(kqd, rq);
> -	blk_mq_finish_request(rq);
> }
>=20
> static void kyber_completed_request(struct request *rq)
> @@ -816,7 +814,7 @@ static struct elevator_type kyber_sched =3D {
> 		.init_hctx =3D kyber_init_hctx,
> 		.exit_hctx =3D kyber_exit_hctx,
> 		.get_request =3D kyber_get_request,
> -		.put_request =3D kyber_put_request,
> +		.finish_request =3D kyber_finish_request,
> 		.completed_request =3D kyber_completed_request,
> 		.dispatch_request =3D kyber_dispatch_request,
> 		.has_work =3D kyber_has_work,
> diff --git a/include/linux/elevator.h b/include/linux/elevator.h
> index 0e306c5a86d6..4acea351d43f 100644
> --- a/include/linux/elevator.h
> +++ b/include/linux/elevator.h
> @@ -105,7 +105,7 @@ struct elevator_mq_ops {
> 	void (*request_merged)(struct request_queue *, struct request *, =
enum elv_merge);
> 	void (*requests_merged)(struct request_queue *, struct request =
*, struct request *);
> 	struct request *(*get_request)(struct request_queue *, unsigned =
int, struct blk_mq_alloc_data *);
> -	void (*put_request)(struct request *);
> +	void (*finish_request)(struct request *);
> 	void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head =
*, bool);
> 	struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
> 	bool (*has_work)(struct blk_mq_hw_ctx *);
> @@ -115,7 +115,6 @@ struct elevator_mq_ops {
> 	struct request *(*former_request)(struct request_queue *, struct =
request *);
> 	struct request *(*next_request)(struct request_queue *, struct =
request *);
> 	int (*get_rq_priv)(struct request_queue *, struct request *, =
struct bio *);
> -	void (*put_rq_priv)(struct request_queue *, struct request *);
> 	void (*init_icq)(struct io_cq *);
> 	void (*exit_icq)(struct io_cq *);
> };
> --=20
> 2.11.0
>=20

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 07/10] bfq-iosched: fix NULL ioc check in bfq_get_rq_private
  2017-06-16 16:15 ` [PATCH 07/10] bfq-iosched: fix NULL ioc check in bfq_get_rq_private Christoph Hellwig
@ 2017-06-19 13:14   ` Paolo Valente
  0 siblings, 0 replies; 20+ messages in thread
From: Paolo Valente @ 2017-06-19 13:14 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Jens Axboe, Omar Sandoval, linux-block


> Il giorno 16 giu 2017, alle ore 18:15, Christoph Hellwig <hch@lst.de> =
ha scritto:
>=20
> icq_to_bic is a container_of operation, so we need to check for NULL
> before it.  Also move the check outside the spinlock while we're at
> it.
>=20
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> block/bfq-iosched.c | 15 +++++----------
> 1 file changed, 5 insertions(+), 10 deletions(-)
>=20
> diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
> index 4f69e39c2f89..f037b005faa1 100644
> --- a/block/bfq-iosched.c
> +++ b/block/bfq-iosched.c
> @@ -4398,16 +4398,17 @@ static int bfq_get_rq_private(struct =
request_queue *q, struct request *rq,
> 			      struct bio *bio)
> {
> 	struct bfq_data *bfqd =3D q->elevator->elevator_data;
> -	struct bfq_io_cq *bic =3D icq_to_bic(rq->elv.icq);
> +	struct bfq_io_cq *bic;
> 	const int is_sync =3D rq_is_sync(rq);
> 	struct bfq_queue *bfqq;
> 	bool new_queue =3D false;
> 	bool split =3D false;
>=20
> -	spin_lock_irq(&bfqd->lock);

Thanks for taking a step I feared here (basically for cowardice) ...

> +	if (!rq->elv.icq)
> +		return 1;
> +	bic =3D icq_to_bic(rq->elv.icq);
>=20
> -	if (!bic)
> -		goto queue_fail;
> +	spin_lock_irq(&bfqd->lock);
>=20
> 	bfq_check_ioprio_change(bic, bio);
>=20
> @@ -4465,13 +4466,7 @@ static int bfq_get_rq_private(struct =
request_queue *q, struct request *rq,
> 		bfq_handle_burst(bfqd, bfqq);
>=20
> 	spin_unlock_irq(&bfqd->lock);
> -
> 	return 0;
> -
> -queue_fail:
> -	spin_unlock_irq(&bfqd->lock);
> -
> -	return 1;
> }
>=20
> static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
> --=20
> 2.11.0
>=20

Acked-by: Paolo Valente <paolo.valente@linaro.org>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 09/10] blk-mq-sched: unify request prepare methods
  2017-06-16 16:15 ` [PATCH 09/10] blk-mq-sched: unify request prepare methods Christoph Hellwig
@ 2017-06-19 13:32   ` Paolo Valente
  2017-06-20  9:10     ` Christoph Hellwig
  0 siblings, 1 reply; 20+ messages in thread
From: Paolo Valente @ 2017-06-19 13:32 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Jens Axboe, Omar Sandoval, linux-block


> Il giorno 16 giu 2017, alle ore 18:15, Christoph Hellwig <hch@lst.de> =
ha scritto:
>=20
> This patch makes sure we always allocate requests in the core blk-mq
> code and use a common prepare_request method to initialize them for
> both mq I/O schedulers.  For Kyber and additional limit_depth method
> is added that is called before allocating the request.
>=20
> Also because none of the intializations can really fail the new method
> does not return an error - instead the bfq finish method is hardened
> to deal with the no-IOC case.
>=20
> Last but not least this removes the abuse of RQF_QUEUE by the blk-mq
> scheduling code as RQF_ELFPRIV is all that is needed now.
>=20
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> block/bfq-iosched.c      | 19 ++++++++++++-------
> block/blk-mq.c           | 22 ++++++----------------
> block/kyber-iosched.c    | 23 +++++++++++------------
> include/linux/elevator.h |  4 ++--
> 4 files changed, 31 insertions(+), 37 deletions(-)
>=20
> diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
> index f037b005faa1..60d32700f104 100644
> --- a/block/bfq-iosched.c
> +++ b/block/bfq-iosched.c
> @@ -4292,8 +4292,14 @@ static void bfq_put_rq_priv_body(struct =
bfq_queue *bfqq)
>=20
> static void bfq_finish_request(struct request *rq)
> {
> -	struct bfq_queue *bfqq =3D RQ_BFQQ(rq);
> -	struct bfq_data *bfqd =3D bfqq->bfqd;
> +	struct bfq_queue *bfqq;
> +	struct bfq_data *bfqd;
> +
> +	if (!rq->elv.icq)
> +		return;
> +

If this is a rq dispatched from a bfqq (or even a request still in the
scheduler), then just exiting here will break bfq state seriously.
However, I guess that this case can never occur.

If this is a rq dispatched from the bfqd->dispatch list, then exiting
here should only unbalance the bfqd->rq_in_driver counter.

I guess that in both cases there wouldn't be problems for the missing
reset of the content of rq->elv.priv (if the function terminates
here).

If I can be of further help in some way, I'm willing to help.

Thanks,
Paolo

> +	bfqq =3D RQ_BFQQ(rq);
> +	bfqd =3D bfqq->bfqd;
>=20
> 	if (rq->rq_flags & RQF_STARTED)
> 		bfqg_stats_update_completion(bfqq_group(bfqq),
> @@ -4394,9 +4400,9 @@ static struct bfq_queue =
*bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
> /*
>  * Allocate bfq data structures associated with this request.
>  */
> -static int bfq_get_rq_private(struct request_queue *q, struct request =
*rq,
> -			      struct bio *bio)
> +static void bfq_prepare_request(struct request *rq, struct bio *bio)
> {
> +	struct request_queue *q =3D rq->q;
> 	struct bfq_data *bfqd =3D q->elevator->elevator_data;
> 	struct bfq_io_cq *bic;
> 	const int is_sync =3D rq_is_sync(rq);
> @@ -4405,7 +4411,7 @@ static int bfq_get_rq_private(struct =
request_queue *q, struct request *rq,
> 	bool split =3D false;
>=20
> 	if (!rq->elv.icq)
> -		return 1;
> +		return;
> 	bic =3D icq_to_bic(rq->elv.icq);
>=20
> 	spin_lock_irq(&bfqd->lock);
> @@ -4466,7 +4472,6 @@ static int bfq_get_rq_private(struct =
request_queue *q, struct request *rq,
> 		bfq_handle_burst(bfqd, bfqq);
>=20
> 	spin_unlock_irq(&bfqd->lock);
> -	return 0;
> }
>=20
> static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
> @@ -4945,7 +4950,7 @@ static struct elv_fs_entry bfq_attrs[] =3D {
>=20
> static struct elevator_type iosched_bfq_mq =3D {
> 	.ops.mq =3D {
> -		.get_rq_priv		=3D bfq_get_rq_private,
> +		.prepare_request	=3D bfq_prepare_request,
> 		.finish_request		=3D bfq_finish_request,
> 		.exit_icq		=3D bfq_exit_icq,
> 		.insert_requests	=3D bfq_insert_requests,
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 2f380ab7a603..81d05c19d4b3 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -298,16 +298,11 @@ static struct request *blk_mq_get_request(struct =
request_queue *q,
> 		 * Flush requests are special and go directly to the
> 		 * dispatch list.
> 		 */
> -		if (!op_is_flush(op) && e->type->ops.mq.get_request) {
> -			rq =3D e->type->ops.mq.get_request(q, op, data);
> -			if (rq)
> -				rq->rq_flags |=3D RQF_QUEUED;
> -			goto allocated;
> -		}
> +		if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
> +			e->type->ops.mq.limit_depth(op, data);
> 	}
>=20
> 	rq =3D __blk_mq_alloc_request(data, op);
> -allocated:
> 	if (!rq) {
> 		blk_queue_exit(q);
> 		return NULL;
> @@ -315,17 +310,12 @@ static struct request *blk_mq_get_request(struct =
request_queue *q,
>=20
> 	if (!op_is_flush(op)) {
> 		rq->elv.icq =3D NULL;
> -		if (e && e->type->ops.mq.get_rq_priv) {
> +		if (e && e->type->ops.mq.prepare_request) {
> 			if (e->type->icq_cache && rq_ioc(bio))
> 				blk_mq_sched_assign_ioc(rq, bio);
>=20
> -			if (e->type->ops.mq.get_rq_priv(q, rq, bio)) {
> -				if (rq->elv.icq)
> -					=
put_io_context(rq->elv.icq->ioc);
> -				rq->elv.icq =3D NULL;
> -			} else {
> -				rq->rq_flags |=3D RQF_ELVPRIV;
> -			}
> +			e->type->ops.mq.prepare_request(rq, bio);
> +			rq->rq_flags |=3D RQF_ELVPRIV;
> 		}
> 	}
> 	data->hctx->queued++;
> @@ -413,7 +403,7 @@ void blk_mq_free_request(struct request *rq)
> 	struct blk_mq_hw_ctx *hctx =3D blk_mq_map_queue(q, ctx->cpu);
> 	const int sched_tag =3D rq->internal_tag;
>=20
> -	if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) {
> +	if (rq->rq_flags & RQF_ELVPRIV) {
> 		if (e && e->type->ops.mq.finish_request)
> 			e->type->ops.mq.finish_request(rq);
> 		if (rq->elv.icq) {
> diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
> index 2557b399f0a8..a9f6fd3fab8e 100644
> --- a/block/kyber-iosched.c
> +++ b/block/kyber-iosched.c
> @@ -426,24 +426,22 @@ static void rq_clear_domain_token(struct =
kyber_queue_data *kqd,
> 	}
> }
>=20
> -static struct request *kyber_get_request(struct request_queue *q,
> -					 unsigned int op,
> -					 struct blk_mq_alloc_data *data)
> +static void kyber_limit_depth(unsigned int op, struct =
blk_mq_alloc_data *data)
> {
> -	struct kyber_queue_data *kqd =3D q->elevator->elevator_data;
> -	struct request *rq;
> -
> 	/*
> 	 * We use the scheduler tags as per-hardware queue queueing =
tokens.
> 	 * Async requests can be limited at this stage.
> 	 */
> -	if (!op_is_sync(op))
> +	if (!op_is_sync(op)) {
> +		struct kyber_queue_data *kqd =3D =
data->q->elevator->elevator_data;
> +
> 		data->shallow_depth =3D kqd->async_depth;
> +	}
> +}
>=20
> -	rq =3D __blk_mq_alloc_request(data, op);
> -	if (rq)
> -		rq_set_domain_token(rq, -1);
> -	return rq;
> +static void kyber_prepare_request(struct request *rq, struct bio =
*bio)
> +{
> +	rq_set_domain_token(rq, -1);
> }
>=20
> static void kyber_finish_request(struct request *rq)
> @@ -813,7 +811,8 @@ static struct elevator_type kyber_sched =3D {
> 		.exit_sched =3D kyber_exit_sched,
> 		.init_hctx =3D kyber_init_hctx,
> 		.exit_hctx =3D kyber_exit_hctx,
> -		.get_request =3D kyber_get_request,
> +		.limit_depth =3D kyber_limit_depth,
> +		.prepare_request =3D kyber_prepare_request,
> 		.finish_request =3D kyber_finish_request,
> 		.completed_request =3D kyber_completed_request,
> 		.dispatch_request =3D kyber_dispatch_request,
> diff --git a/include/linux/elevator.h b/include/linux/elevator.h
> index 4acea351d43f..5bc8f8682a3e 100644
> --- a/include/linux/elevator.h
> +++ b/include/linux/elevator.h
> @@ -104,7 +104,8 @@ struct elevator_mq_ops {
> 	int (*request_merge)(struct request_queue *q, struct request **, =
struct bio *);
> 	void (*request_merged)(struct request_queue *, struct request *, =
enum elv_merge);
> 	void (*requests_merged)(struct request_queue *, struct request =
*, struct request *);
> -	struct request *(*get_request)(struct request_queue *, unsigned =
int, struct blk_mq_alloc_data *);
> +	void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
> +	void (*prepare_request)(struct request *, struct bio *bio);
> 	void (*finish_request)(struct request *);
> 	void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head =
*, bool);
> 	struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
> @@ -114,7 +115,6 @@ struct elevator_mq_ops {
> 	void (*requeue_request)(struct request *);
> 	struct request *(*former_request)(struct request_queue *, struct =
request *);
> 	struct request *(*next_request)(struct request_queue *, struct =
request *);
> -	int (*get_rq_priv)(struct request_queue *, struct request *, =
struct bio *);
> 	void (*init_icq)(struct io_cq *);
> 	void (*exit_icq)(struct io_cq *);
> };
> --=20
> 2.11.0
>=20

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 09/10] blk-mq-sched: unify request prepare methods
  2017-06-19 13:32   ` Paolo Valente
@ 2017-06-20  9:10     ` Christoph Hellwig
  0 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-06-20  9:10 UTC (permalink / raw)
  To: Paolo Valente; +Cc: Christoph Hellwig, Jens Axboe, Omar Sandoval, linux-block

On Mon, Jun 19, 2017 at 03:32:09PM +0200, Paolo Valente wrote:
> > static void bfq_finish_request(struct request *rq)
> > {
> > -	struct bfq_queue *bfqq = RQ_BFQQ(rq);
> > -	struct bfq_data *bfqd = bfqq->bfqd;
> > +	struct bfq_queue *bfqq;
> > +	struct bfq_data *bfqd;
> > +
> > +	if (!rq->elv.icq)
> > +		return;
> > +
> 
> If this is a rq dispatched from a bfqq (or even a request still in the
> scheduler), then just exiting here will break bfq state seriously.
> However, I guess that this case can never occur.

It is a request for which we didn't manage to allocate the ioc.

Previously those wouldn't have REQ_ELVPRIV set and thus we wouldn't
call into the finish method.  Now they do and the finish method needs
to handle those just like the init method.

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2017-06-20  9:10 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-16 16:15 streamline blk-mq I/O scheduler interaction Christoph Hellwig
2017-06-16 16:15 ` [PATCH 01/10] blk-mq: mark blk_mq_rq_ctx_init static Christoph Hellwig
2017-06-16 16:15 ` [PATCH 02/10] blk-mq: move blk_mq_sched_{get,put}_request to blk-mq.c Christoph Hellwig
2017-06-16 16:15 ` [PATCH 03/10] blk-mq: remove blk_mq_sched_{get,put}_rq_priv Christoph Hellwig
2017-06-16 16:15 ` [PATCH 04/10] blk-mq-sched: unify request finished methods Christoph Hellwig
2017-06-19 13:01   ` Paolo Valente
2017-06-16 16:15 ` [PATCH 05/10] blk-mq: simplify blk_mq_free_request Christoph Hellwig
2017-06-17 21:50   ` Jens Axboe
2017-06-18  7:24     ` Christoph Hellwig
2017-06-18 15:05       ` Jens Axboe
2017-06-16 16:15 ` [PATCH 06/10] blk-mq: streamline blk_mq_get_request Christoph Hellwig
2017-06-16 16:15 ` [PATCH 07/10] bfq-iosched: fix NULL ioc check in bfq_get_rq_private Christoph Hellwig
2017-06-19 13:14   ` Paolo Valente
2017-06-16 16:15 ` [PATCH 08/10] blk-mq: refactor blk_mq_sched_assign_ioc Christoph Hellwig
2017-06-16 16:15 ` [PATCH 09/10] blk-mq-sched: unify request prepare methods Christoph Hellwig
2017-06-19 13:32   ` Paolo Valente
2017-06-20  9:10     ` Christoph Hellwig
2017-06-16 16:15 ` [PATCH 10/10] blk-mq: remove __blk_mq_alloc_request Christoph Hellwig
2017-06-16 20:30 ` streamline blk-mq I/O scheduler interaction Jens Axboe
2017-06-18 18:35 ` Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.