All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jens Axboe <axboe@kernel.dk>
To: linux-block@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>, Josef Bacik <josef@toxicpanda.com>
Subject: [PATCH 04/11] blk-rq-qos: inline check for q->rq_qos functions
Date: Tue, 13 Nov 2018 08:42:26 -0700	[thread overview]
Message-ID: <20181113154233.15256-5-axboe@kernel.dk> (raw)
In-Reply-To: <20181113154233.15256-1-axboe@kernel.dk>

Put the short code in the fast path, where we don't have any
functions attached to the queue. This minimizes the impact on
the hot path in the core code.

Cleanup duplicated code by having a macro setup both the inline
check and the actual functions.

Cc: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-rq-qos.c | 90 +++++++++++++---------------------------------
 block/blk-rq-qos.h | 35 ++++++++++++++----
 2 files changed, 52 insertions(+), 73 deletions(-)

diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 0005dfd568dd..266c9e111475 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -27,76 +27,34 @@ bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
 	return atomic_inc_below(&rq_wait->inflight, limit);
 }
 
-void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
-{
-	struct rq_qos *rqos;
-
-	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
-		if (rqos->ops->cleanup)
-			rqos->ops->cleanup(rqos, bio);
-	}
-}
-
-void rq_qos_done(struct request_queue *q, struct request *rq)
-{
-	struct rq_qos *rqos;
-
-	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
-		if (rqos->ops->done)
-			rqos->ops->done(rqos, rq);
-	}
-}
-
-void rq_qos_issue(struct request_queue *q, struct request *rq)
-{
-	struct rq_qos *rqos;
-
-	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
-		if (rqos->ops->issue)
-			rqos->ops->issue(rqos, rq);
-	}
-}
-
-void rq_qos_requeue(struct request_queue *q, struct request *rq)
-{
-	struct rq_qos *rqos;
-
-	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
-		if (rqos->ops->requeue)
-			rqos->ops->requeue(rqos, rq);
-	}
+#define __RQ_QOS_FUNC_ONE(__OP, type)					\
+void __rq_qos_##__OP(struct rq_qos *rqos, type arg)			\
+{									\
+	do {								\
+		if ((rqos)->ops->__OP)					\
+			(rqos)->ops->__OP((rqos), arg);			\
+		(rqos) = (rqos)->next;					\
+	} while (rqos);							\
 }
 
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
-		     spinlock_t *lock)
-{
-	struct rq_qos *rqos;
-
-	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
-		if (rqos->ops->throttle)
-			rqos->ops->throttle(rqos, bio, lock);
-	}
+__RQ_QOS_FUNC_ONE(cleanup, struct bio *);
+__RQ_QOS_FUNC_ONE(done, struct request *);
+__RQ_QOS_FUNC_ONE(issue, struct request *);
+__RQ_QOS_FUNC_ONE(requeue, struct request *);
+__RQ_QOS_FUNC_ONE(done_bio, struct bio *);
+
+#define __RQ_QOS_FUNC_TWO(__OP, type1, type2)				\
+void __rq_qos_##__OP(struct rq_qos *rqos, type1 arg1, type2 arg2)	\
+{									\
+	do {								\
+		if ((rqos)->ops->__OP)					\
+			(rqos)->ops->__OP((rqos), arg1, arg2);		\
+		(rqos) = (rqos)->next;					\
+	} while (rqos);							\
 }
 
-void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
-{
-	struct rq_qos *rqos;
-
-	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
-		if (rqos->ops->track)
-			rqos->ops->track(rqos, rq, bio);
-	}
-}
-
-void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
-{
-	struct rq_qos *rqos;
-
-	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
-		if (rqos->ops->done_bio)
-			rqos->ops->done_bio(rqos, bio);
-	}
-}
+__RQ_QOS_FUNC_TWO(throttle, struct bio *, spinlock_t *);
+__RQ_QOS_FUNC_TWO(track, struct request *, struct bio *);
 
 /*
  * Return true, if we can't increase the depth further by scaling
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 32b02efbfa66..50558a6ea248 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -98,12 +98,33 @@ void rq_depth_scale_up(struct rq_depth *rqd);
 void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
 
-void rq_qos_cleanup(struct request_queue *, struct bio *);
-void rq_qos_done(struct request_queue *, struct request *);
-void rq_qos_issue(struct request_queue *, struct request *);
-void rq_qos_requeue(struct request_queue *, struct request *);
-void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
-void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
-void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
+#define RQ_QOS_FUNC_ONE(__OP, type)					\
+void __rq_qos_##__OP(struct rq_qos *rqos, type arg);			\
+static inline void rq_qos_##__OP(struct request_queue *q, type arg)	\
+{									\
+	if ((q)->rq_qos)						\
+		__rq_qos_##__OP((q)->rq_qos, arg);			\
+}
+
+#define RQ_QOS_FUNC_TWO(__OP, type1, type2)				\
+void __rq_qos_##__OP(struct rq_qos *rqos, type1 arg1, type2 arg2);	\
+static inline void rq_qos_##__OP(struct request_queue *q, type1 arg1,	\
+				 type2 arg2)				\
+{									\
+	if ((q)->rq_qos)						\
+		__rq_qos_##__OP((q)->rq_qos, arg1, arg2);		\
+}
+
+RQ_QOS_FUNC_ONE(cleanup, struct bio *);
+RQ_QOS_FUNC_ONE(done, struct request *);
+RQ_QOS_FUNC_ONE(issue, struct request *);
+RQ_QOS_FUNC_ONE(requeue, struct request *);
+RQ_QOS_FUNC_ONE(done_bio, struct bio *);
+RQ_QOS_FUNC_TWO(throttle, struct bio *, spinlock_t *);
+RQ_QOS_FUNC_TWO(track, struct request *, struct bio *);
+#undef RQ_QOS_FUNC_ONE
+#undef RQ_QOS_FUNC_TWO
+
 void rq_qos_exit(struct request_queue *);
+
 #endif
-- 
2.17.1


  parent reply	other threads:[~2018-11-13 15:42 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-13 15:42 [PATCHSET v2 0/11] Various block optimizations Jens Axboe
2018-11-13 15:42 ` [PATCH 01/11] nvme: don't disable local ints for polled queue Jens Axboe
2018-11-13 15:42   ` Jens Axboe
2018-11-13 15:42 ` [PATCH 02/11] block: add queue_is_mq() helper Jens Axboe
2018-11-14 15:23   ` Christoph Hellwig
2018-11-13 15:42 ` [PATCH 03/11] blk-mq: embed blk_mq_ops directly in the request queue Jens Axboe
2018-11-13 15:42 ` Jens Axboe [this message]
2018-11-13 15:42 ` [PATCH 05/11] block: avoid ordered task state change for polled IO Jens Axboe
2018-11-14  2:29   ` jianchao.wang
2018-11-14  2:35     ` Jens Axboe
2018-11-13 15:42 ` [PATCH 06/11] block: add polled wakeup task helper Jens Axboe
2018-11-13 15:52   ` Keith Busch
2018-11-13 15:58     ` Jens Axboe
2018-11-13 16:59       ` Jens Axboe
2018-11-13 15:42 ` [PATCH 07/11] block: have ->poll_fn() return number of entries polled Jens Axboe
2018-11-13 15:42 ` [PATCH 08/11] blk-mq: when polling for IO, look for any completion Jens Axboe
2018-11-13 15:42 ` [PATCH 09/11] block: make blk_poll() take a parameter on whether to spin or not Jens Axboe
2018-11-13 15:42 ` [PATCH 10/11] block: for async O_DIRECT, mark us as polling if asked to Jens Axboe
2018-11-13 15:42 ` [PATCH 11/11] block: don't plug for aio/O_DIRECT HIPRI IO Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181113154233.15256-5-axboe@kernel.dk \
    --to=axboe@kernel.dk \
    --cc=josef@toxicpanda.com \
    --cc=linux-block@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.