linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] block, bfq: remove batches of confusing ifdefs
@ 2017-11-28  9:37 Paolo Valente
  2017-11-28 10:39 ` Ulf Hansson
  2017-11-30 21:21 ` Jens Axboe
  0 siblings, 2 replies; 6+ messages in thread
From: Paolo Valente @ 2017-11-28  9:37 UTC (permalink / raw)
  To: Jens Axboe
  Cc: linux-block, linux-kernel, ulf.hansson, broonie, linus.walleij,
	lucmiccio, bfq-iosched, Paolo Valente

Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs:
one reported in [1], plus a similar one in another function. This
commit removes both batches, in the way suggested in [1].

[1] https://www.spinics.net/lists/linux-block/msg20043.html

Fixes: a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP")
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Tested-by: Luca Miccio <lucmiccio@gmail.com>
Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
---
 block/bfq-iosched.c | 127 +++++++++++++++++++++++++++++-----------------------
 1 file changed, 72 insertions(+), 55 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index bcb6d21..0153fea 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -3689,35 +3689,16 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
 	return rq;
 }
 
-static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
-{
-	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
-	struct request *rq;
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
-	struct bfq_queue *in_serv_queue, *bfqq;
-	bool waiting_rq, idle_timer_disabled;
-#endif
-
-	spin_lock_irq(&bfqd->lock);
-
 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
-	in_serv_queue = bfqd->in_service_queue;
-	waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
-
-	rq = __bfq_dispatch_request(hctx);
-
-	idle_timer_disabled =
-		waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
-
-#else
-	rq = __bfq_dispatch_request(hctx);
-#endif
-	spin_unlock_irq(&bfqd->lock);
+static inline void bfq_update_dispatch_stats(struct request *rq,
+					     spinlock_t *queue_lock,
+					     struct bfq_queue *in_serv_queue,
+					     bool idle_timer_disabled)
+{
+	struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
 
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
-	bfqq = rq ? RQ_BFQQ(rq) : NULL;
 	if (!idle_timer_disabled && !bfqq)
-		return rq;
+		return;
 
 	/*
 	 * rq and bfqq are guaranteed to exist until this function
@@ -3732,7 +3713,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
 	 * In addition, the following queue lock guarantees that
 	 * bfqq_group(bfqq) exists as well.
 	 */
-	spin_lock_irq(hctx->queue->queue_lock);
+	spin_lock_irq(queue_lock);
 	if (idle_timer_disabled)
 		/*
 		 * Since the idle timer has been disabled,
@@ -3751,9 +3732,37 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
 		bfqg_stats_set_start_empty_time(bfqg);
 		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
 	}
-	spin_unlock_irq(hctx->queue->queue_lock);
+	spin_unlock_irq(queue_lock);
+}
+#else
+static inline void bfq_update_dispatch_stats(struct request *rq,
+					     spinlock_t *queue_lock,
+					     struct bfq_queue *in_serv_queue,
+					     bool idle_timer_disabled) {}
 #endif
 
+static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+{
+	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+	struct request *rq;
+	struct bfq_queue *in_serv_queue;
+	bool waiting_rq, idle_timer_disabled;
+
+	spin_lock_irq(&bfqd->lock);
+
+	in_serv_queue = bfqd->in_service_queue;
+	waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+
+	rq = __bfq_dispatch_request(hctx);
+
+	idle_timer_disabled =
+		waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
+
+	spin_unlock_irq(&bfqd->lock);
+
+	bfq_update_dispatch_stats(rq, hctx->queue->queue_lock, in_serv_queue,
+				  idle_timer_disabled);
+
 	return rq;
 }
 
@@ -4276,16 +4285,46 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
 	return idle_timer_disabled;
 }
 
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
+					   spinlock_t *queue_lock,
+					   bool idle_timer_disabled,
+					   unsigned int cmd_flags)
+{
+	if (!bfqq)
+		return;
+
+	/*
+	 * bfqq still exists, because it can disappear only after
+	 * either it is merged with another queue, or the process it
+	 * is associated with exits. But both actions must be taken by
+	 * the same process currently executing this flow of
+	 * instructions.
+	 *
+	 * In addition, the following queue lock guarantees that
+	 * bfqq_group(bfqq) exists as well.
+	 */
+	spin_lock_irq(queue_lock);
+	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
+	if (idle_timer_disabled)
+		bfqg_stats_update_idle_time(bfqq_group(bfqq));
+	spin_unlock_irq(queue_lock);
+}
+#else
+static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
+					   spinlock_t *queue_lock,
+					   bool idle_timer_disabled,
+					   unsigned int cmd_flags) {}
+#endif
+
 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 			       bool at_head)
 {
 	struct request_queue *q = hctx->queue;
 	struct bfq_data *bfqd = q->elevator->elevator_data;
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
 	bool idle_timer_disabled = false;
 	unsigned int cmd_flags;
-#endif
 
 	spin_lock_irq(&bfqd->lock);
 	if (blk_mq_sched_try_insert_merge(q, rq)) {
@@ -4304,7 +4343,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 		else
 			list_add_tail(&rq->queuelist, &bfqd->dispatch);
 	} else {
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 		idle_timer_disabled = __bfq_insert_request(bfqd, rq);
 		/*
 		 * Update bfqq, because, if a queue merge has occurred
@@ -4312,9 +4350,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 		 * redirected into a new queue.
 		 */
 		bfqq = RQ_BFQQ(rq);
-#else
-		__bfq_insert_request(bfqd, rq);
-#endif
 
 		if (rq_mergeable(rq)) {
 			elv_rqhash_add(q, rq);
@@ -4323,35 +4358,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 		}
 	}
 
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 	/*
 	 * Cache cmd_flags before releasing scheduler lock, because rq
 	 * may disappear afterwards (for example, because of a request
 	 * merge).
 	 */
 	cmd_flags = rq->cmd_flags;
-#endif
+
 	spin_unlock_irq(&bfqd->lock);
 
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
-	if (!bfqq)
-		return;
-	/*
-	 * bfqq still exists, because it can disappear only after
-	 * either it is merged with another queue, or the process it
-	 * is associated with exits. But both actions must be taken by
-	 * the same process currently executing this flow of
-	 * instruction.
-	 *
-	 * In addition, the following queue lock guarantees that
-	 * bfqq_group(bfqq) exists as well.
-	 */
-	spin_lock_irq(q->queue_lock);
-	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
-	if (idle_timer_disabled)
-		bfqg_stats_update_idle_time(bfqq_group(bfqq));
-	spin_unlock_irq(q->queue_lock);
-#endif
+	bfq_update_insert_stats(bfqq, q->queue_lock, idle_timer_disabled,
+				cmd_flags);
 }
 
 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
-- 
2.10.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] block, bfq: remove batches of confusing ifdefs
  2017-11-28  9:37 [PATCH] block, bfq: remove batches of confusing ifdefs Paolo Valente
@ 2017-11-28 10:39 ` Ulf Hansson
  2017-11-30 21:21 ` Jens Axboe
  1 sibling, 0 replies; 6+ messages in thread
From: Ulf Hansson @ 2017-11-28 10:39 UTC (permalink / raw)
  To: Paolo Valente
  Cc: Jens Axboe, linux-block, linux-kernel, Mark Brown, Linus Walleij,
	Luca Miccio, bfq-iosched

On 28 November 2017 at 10:37, Paolo Valente <paolo.valente@linaro.org> wrote:
> Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
> CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs:
> one reported in [1], plus a similar one in another function. This
> commit removes both batches, in the way suggested in [1].
>
> [1] https://www.spinics.net/lists/linux-block/msg20043.html
>
> Fixes: a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP")
> Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
> Tested-by: Luca Miccio <lucmiccio@gmail.com>
> Signed-off-by: Paolo Valente <paolo.valente@linaro.org>

This is a good step of improvement (one may consider more steps...),
so feel free to add:

Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>

Kind regards
Uffe

> ---
>  block/bfq-iosched.c | 127 +++++++++++++++++++++++++++++-----------------------
>  1 file changed, 72 insertions(+), 55 deletions(-)
>
> diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
> index bcb6d21..0153fea 100644
> --- a/block/bfq-iosched.c
> +++ b/block/bfq-iosched.c
> @@ -3689,35 +3689,16 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
>         return rq;
>  }
>
> -static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
> -{
> -       struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
> -       struct request *rq;
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> -       struct bfq_queue *in_serv_queue, *bfqq;
> -       bool waiting_rq, idle_timer_disabled;
> -#endif
> -
> -       spin_lock_irq(&bfqd->lock);
> -
>  #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> -       in_serv_queue = bfqd->in_service_queue;
> -       waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
> -
> -       rq = __bfq_dispatch_request(hctx);
> -
> -       idle_timer_disabled =
> -               waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
> -
> -#else
> -       rq = __bfq_dispatch_request(hctx);
> -#endif
> -       spin_unlock_irq(&bfqd->lock);
> +static inline void bfq_update_dispatch_stats(struct request *rq,
> +                                            spinlock_t *queue_lock,
> +                                            struct bfq_queue *in_serv_queue,
> +                                            bool idle_timer_disabled)
> +{
> +       struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
>
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> -       bfqq = rq ? RQ_BFQQ(rq) : NULL;
>         if (!idle_timer_disabled && !bfqq)
> -               return rq;
> +               return;
>
>         /*
>          * rq and bfqq are guaranteed to exist until this function
> @@ -3732,7 +3713,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
>          * In addition, the following queue lock guarantees that
>          * bfqq_group(bfqq) exists as well.
>          */
> -       spin_lock_irq(hctx->queue->queue_lock);
> +       spin_lock_irq(queue_lock);
>         if (idle_timer_disabled)
>                 /*
>                  * Since the idle timer has been disabled,
> @@ -3751,9 +3732,37 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
>                 bfqg_stats_set_start_empty_time(bfqg);
>                 bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
>         }
> -       spin_unlock_irq(hctx->queue->queue_lock);
> +       spin_unlock_irq(queue_lock);
> +}
> +#else
> +static inline void bfq_update_dispatch_stats(struct request *rq,
> +                                            spinlock_t *queue_lock,
> +                                            struct bfq_queue *in_serv_queue,
> +                                            bool idle_timer_disabled) {}
>  #endif
>
> +static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
> +{
> +       struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
> +       struct request *rq;
> +       struct bfq_queue *in_serv_queue;
> +       bool waiting_rq, idle_timer_disabled;
> +
> +       spin_lock_irq(&bfqd->lock);
> +
> +       in_serv_queue = bfqd->in_service_queue;
> +       waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
> +
> +       rq = __bfq_dispatch_request(hctx);
> +
> +       idle_timer_disabled =
> +               waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
> +
> +       spin_unlock_irq(&bfqd->lock);
> +
> +       bfq_update_dispatch_stats(rq, hctx->queue->queue_lock, in_serv_queue,
> +                                 idle_timer_disabled);
> +
>         return rq;
>  }
>
> @@ -4276,16 +4285,46 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
>         return idle_timer_disabled;
>  }
>
> +#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> +static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
> +                                          spinlock_t *queue_lock,
> +                                          bool idle_timer_disabled,
> +                                          unsigned int cmd_flags)
> +{
> +       if (!bfqq)
> +               return;
> +
> +       /*
> +        * bfqq still exists, because it can disappear only after
> +        * either it is merged with another queue, or the process it
> +        * is associated with exits. But both actions must be taken by
> +        * the same process currently executing this flow of
> +        * instructions.
> +        *
> +        * In addition, the following queue lock guarantees that
> +        * bfqq_group(bfqq) exists as well.
> +        */
> +       spin_lock_irq(queue_lock);
> +       bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
> +       if (idle_timer_disabled)
> +               bfqg_stats_update_idle_time(bfqq_group(bfqq));
> +       spin_unlock_irq(queue_lock);
> +}
> +#else
> +static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
> +                                          spinlock_t *queue_lock,
> +                                          bool idle_timer_disabled,
> +                                          unsigned int cmd_flags) {}
> +#endif
> +
>  static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
>                                bool at_head)
>  {
>         struct request_queue *q = hctx->queue;
>         struct bfq_data *bfqd = q->elevator->elevator_data;
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
>         struct bfq_queue *bfqq = RQ_BFQQ(rq);
>         bool idle_timer_disabled = false;
>         unsigned int cmd_flags;
> -#endif
>
>         spin_lock_irq(&bfqd->lock);
>         if (blk_mq_sched_try_insert_merge(q, rq)) {
> @@ -4304,7 +4343,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
>                 else
>                         list_add_tail(&rq->queuelist, &bfqd->dispatch);
>         } else {
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
>                 idle_timer_disabled = __bfq_insert_request(bfqd, rq);
>                 /*
>                  * Update bfqq, because, if a queue merge has occurred
> @@ -4312,9 +4350,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
>                  * redirected into a new queue.
>                  */
>                 bfqq = RQ_BFQQ(rq);
> -#else
> -               __bfq_insert_request(bfqd, rq);
> -#endif
>
>                 if (rq_mergeable(rq)) {
>                         elv_rqhash_add(q, rq);
> @@ -4323,35 +4358,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
>                 }
>         }
>
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
>         /*
>          * Cache cmd_flags before releasing scheduler lock, because rq
>          * may disappear afterwards (for example, because of a request
>          * merge).
>          */
>         cmd_flags = rq->cmd_flags;
> -#endif
> +
>         spin_unlock_irq(&bfqd->lock);
>
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> -       if (!bfqq)
> -               return;
> -       /*
> -        * bfqq still exists, because it can disappear only after
> -        * either it is merged with another queue, or the process it
> -        * is associated with exits. But both actions must be taken by
> -        * the same process currently executing this flow of
> -        * instruction.
> -        *
> -        * In addition, the following queue lock guarantees that
> -        * bfqq_group(bfqq) exists as well.
> -        */
> -       spin_lock_irq(q->queue_lock);
> -       bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
> -       if (idle_timer_disabled)
> -               bfqg_stats_update_idle_time(bfqq_group(bfqq));
> -       spin_unlock_irq(q->queue_lock);
> -#endif
> +       bfq_update_insert_stats(bfqq, q->queue_lock, idle_timer_disabled,
> +                               cmd_flags);
>  }
>
>  static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
> --
> 2.10.0
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] block, bfq: remove batches of confusing ifdefs
  2017-11-28  9:37 [PATCH] block, bfq: remove batches of confusing ifdefs Paolo Valente
  2017-11-28 10:39 ` Ulf Hansson
@ 2017-11-30 21:21 ` Jens Axboe
  2017-12-02 10:04   ` Paolo Valente
  1 sibling, 1 reply; 6+ messages in thread
From: Jens Axboe @ 2017-11-30 21:21 UTC (permalink / raw)
  To: Paolo Valente
  Cc: linux-block, linux-kernel, ulf.hansson, broonie, linus.walleij,
	lucmiccio, bfq-iosched

On 11/28/2017 02:37 AM, Paolo Valente wrote:
> Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
> CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs:
> one reported in [1], plus a similar one in another function. This
> commit removes both batches, in the way suggested in [1].

Some comments below.

> +static inline void bfq_update_dispatch_stats(struct request *rq,
> +					     spinlock_t *queue_lock,
> +					     struct bfq_queue *in_serv_queue,
> +					     bool idle_timer_disabled)
> +{

Don't pass in the queue lock. The normal convention is to pass in the
queue, thus making this:

static void bfq_update_dispatch_stats(struct request_queue *q,
				      struct request *rq,
				      struct bfq_queue *in_serv_queue,
				      bool idle_timer_disabled)

which also gets rid of the inline. In general, never inline anything.
The compiler should figure it out for you. This function is way too big
to inline, plus the cost of what it's doing completely dwarfes function
call overhead.


>
> +	struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
>  
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> -	bfqq = rq ? RQ_BFQQ(rq) : NULL;
>  	if (!idle_timer_disabled && !bfqq)
> -		return rq;
> +		return;
>  
>  	/*
>  	 * rq and bfqq are guaranteed to exist until this function
> @@ -3732,7 +3713,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
>  	 * In addition, the following queue lock guarantees that
>  	 * bfqq_group(bfqq) exists as well.
>  	 */
> -	spin_lock_irq(hctx->queue->queue_lock);
> +	spin_lock_irq(queue_lock);
>  	if (idle_timer_disabled)
>  		/*
>  		 * Since the idle timer has been disabled,
> @@ -3751,9 +3732,37 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
>  		bfqg_stats_set_start_empty_time(bfqg);
>  		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
>  	}
> -	spin_unlock_irq(hctx->queue->queue_lock);
> +	spin_unlock_irq(queue_lock);
> +}
> +#else
> +static inline void bfq_update_dispatch_stats(struct request *rq,
> +					     spinlock_t *queue_lock,
> +					     struct bfq_queue *in_serv_queue,
> +					     bool idle_timer_disabled) {}
>  #endif
>  
> +static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
> +{
> +	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
> +	struct request *rq;
> +	struct bfq_queue *in_serv_queue;
> +	bool waiting_rq, idle_timer_disabled;
> +
> +	spin_lock_irq(&bfqd->lock);
> +
> +	in_serv_queue = bfqd->in_service_queue;
> +	waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
> +
> +	rq = __bfq_dispatch_request(hctx);
> +
> +	idle_timer_disabled =
> +		waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
> +
> +	spin_unlock_irq(&bfqd->lock);
> +
> +	bfq_update_dispatch_stats(rq, hctx->queue->queue_lock, in_serv_queue,
> +				  idle_timer_disabled);
> +
>  	return rq;
>  }
>  
> @@ -4276,16 +4285,46 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
>  	return idle_timer_disabled;
>  }
>  
> +#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> +static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
> +					   spinlock_t *queue_lock,
> +					   bool idle_timer_disabled,
> +					   unsigned int cmd_flags)
> +{
> +	if (!bfqq)
> +		return;
> +
> +	/*
> +	 * bfqq still exists, because it can disappear only after
> +	 * either it is merged with another queue, or the process it
> +	 * is associated with exits. But both actions must be taken by
> +	 * the same process currently executing this flow of
> +	 * instructions.
> +	 *
> +	 * In addition, the following queue lock guarantees that
> +	 * bfqq_group(bfqq) exists as well.
> +	 */
> +	spin_lock_irq(queue_lock);
> +	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
> +	if (idle_timer_disabled)
> +		bfqg_stats_update_idle_time(bfqq_group(bfqq));
> +	spin_unlock_irq(queue_lock);
> +}
> +#else
> +static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
> +					   spinlock_t *queue_lock,
> +					   bool idle_timer_disabled,
> +					   unsigned int cmd_flags) {}
> +#endif

Ditto here, kill the inlines.

In general though, good improvement.
 

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] block, bfq: remove batches of confusing ifdefs
  2017-11-30 21:21 ` Jens Axboe
@ 2017-12-02 10:04   ` Paolo Valente
  2017-12-02 16:06     ` Jens Axboe
  0 siblings, 1 reply; 6+ messages in thread
From: Paolo Valente @ 2017-12-02 10:04 UTC (permalink / raw)
  To: Jens Axboe
  Cc: linux-block, Linux Kernel Mailing List, Ulf Hansson, broonie,
	linus.walleij, lucmiccio, bfq-iosched


> Il giorno 30 nov 2017, alle ore 22:21, Jens Axboe <axboe@kernel.dk> ha =
scritto:
>=20
> On 11/28/2017 02:37 AM, Paolo Valente wrote:
>> Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
>> CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs:
>> one reported in [1], plus a similar one in another function. This
>> commit removes both batches, in the way suggested in [1].
>=20
> Some comments below.
>=20
>> +static inline void bfq_update_dispatch_stats(struct request *rq,
>> +					     spinlock_t *queue_lock,
>> +					     struct bfq_queue =
*in_serv_queue,
>> +					     bool idle_timer_disabled)
>> +{
>=20
> Don't pass in the queue lock. The normal convention is to pass in the
> queue, thus making this:
>=20
> static void bfq_update_dispatch_stats(struct request_queue *q,
> 				      struct request *rq,
> 				      struct bfq_queue *in_serv_queue,
> 				      bool idle_timer_disabled)
>=20

Ok, thanks.  One question, just to try to learn, if you have time and
patience for a brief explanation.  Was this convention originated by
some rationale?  My concern is that bfq_update_dispatch_stats works on
no field of q but the lock, and this fact would have been made
explicit by passing only that exact field.

> which also gets rid of the inline. In general, never inline anything.
> The compiler should figure it out for you. This function is way too =
big
> to inline, plus the cost of what it's doing completely dwarfes =
function
> call overhead.
>=20

Actually, I did so because of Linus' suggestion in [1]: "So for
example, the functions that can go away should obviously be inline
functions so that you don't end up having the compiler generate the
arguments and the call to an empty function body ..."

Maybe I misinterpreted his suggestion, and he meant that the function
should be designed in such a way to be (almost) certainly considered
inline by the compiler?

Thanks,
Paolo

[1] https://www.spinics.net/lists/linux-block/msg20043.html

>=20
>>=20
>> +	struct bfq_queue *bfqq =3D rq ? RQ_BFQQ(rq) : NULL;
>>=20
>> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && =
defined(CONFIG_DEBUG_BLK_CGROUP)
>> -	bfqq =3D rq ? RQ_BFQQ(rq) : NULL;
>> 	if (!idle_timer_disabled && !bfqq)
>> -		return rq;
>> +		return;
>>=20
>> 	/*
>> 	 * rq and bfqq are guaranteed to exist until this function
>> @@ -3732,7 +3713,7 @@ static struct request =
*bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
>> 	 * In addition, the following queue lock guarantees that
>> 	 * bfqq_group(bfqq) exists as well.
>> 	 */
>> -	spin_lock_irq(hctx->queue->queue_lock);
>> +	spin_lock_irq(queue_lock);
>> 	if (idle_timer_disabled)
>> 		/*
>> 		 * Since the idle timer has been disabled,
>> @@ -3751,9 +3732,37 @@ static struct request =
*bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
>> 		bfqg_stats_set_start_empty_time(bfqg);
>> 		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
>> 	}
>> -	spin_unlock_irq(hctx->queue->queue_lock);
>> +	spin_unlock_irq(queue_lock);
>> +}
>> +#else
>> +static inline void bfq_update_dispatch_stats(struct request *rq,
>> +					     spinlock_t *queue_lock,
>> +					     struct bfq_queue =
*in_serv_queue,
>> +					     bool idle_timer_disabled) =
{}
>> #endif
>>=20
>> +static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx =
*hctx)
>> +{
>> +	struct bfq_data *bfqd =3D hctx->queue->elevator->elevator_data;
>> +	struct request *rq;
>> +	struct bfq_queue *in_serv_queue;
>> +	bool waiting_rq, idle_timer_disabled;
>> +
>> +	spin_lock_irq(&bfqd->lock);
>> +
>> +	in_serv_queue =3D bfqd->in_service_queue;
>> +	waiting_rq =3D in_serv_queue && =
bfq_bfqq_wait_request(in_serv_queue);
>> +
>> +	rq =3D __bfq_dispatch_request(hctx);
>> +
>> +	idle_timer_disabled =3D
>> +		waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
>> +
>> +	spin_unlock_irq(&bfqd->lock);
>> +
>> +	bfq_update_dispatch_stats(rq, hctx->queue->queue_lock, =
in_serv_queue,
>> +				  idle_timer_disabled);
>> +
>> 	return rq;
>> }
>>=20
>> @@ -4276,16 +4285,46 @@ static bool __bfq_insert_request(struct =
bfq_data *bfqd, struct request *rq)
>> 	return idle_timer_disabled;
>> }
>>=20
>> +#if defined(CONFIG_BFQ_GROUP_IOSCHED) && =
defined(CONFIG_DEBUG_BLK_CGROUP)
>> +static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
>> +					   spinlock_t *queue_lock,
>> +					   bool idle_timer_disabled,
>> +					   unsigned int cmd_flags)
>> +{
>> +	if (!bfqq)
>> +		return;
>> +
>> +	/*
>> +	 * bfqq still exists, because it can disappear only after
>> +	 * either it is merged with another queue, or the process it
>> +	 * is associated with exits. But both actions must be taken by
>> +	 * the same process currently executing this flow of
>> +	 * instructions.
>> +	 *
>> +	 * In addition, the following queue lock guarantees that
>> +	 * bfqq_group(bfqq) exists as well.
>> +	 */
>> +	spin_lock_irq(queue_lock);
>> +	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
>> +	if (idle_timer_disabled)
>> +		bfqg_stats_update_idle_time(bfqq_group(bfqq));
>> +	spin_unlock_irq(queue_lock);
>> +}
>> +#else
>> +static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
>> +					   spinlock_t *queue_lock,
>> +					   bool idle_timer_disabled,
>> +					   unsigned int cmd_flags) {}
>> +#endif
>=20
> Ditto here, kill the inlines.
>=20
> In general though, good improvement.
>=20
>=20
> --=20
> Jens Axboe
>=20

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] block, bfq: remove batches of confusing ifdefs
  2017-12-02 10:04   ` Paolo Valente
@ 2017-12-02 16:06     ` Jens Axboe
  2017-12-02 17:22       ` Paolo Valente
  0 siblings, 1 reply; 6+ messages in thread
From: Jens Axboe @ 2017-12-02 16:06 UTC (permalink / raw)
  To: Paolo Valente
  Cc: linux-block, Linux Kernel Mailing List, Ulf Hansson, broonie,
	linus.walleij, lucmiccio, bfq-iosched

On 12/02/2017 03:04 AM, Paolo Valente wrote:
> 
>> Il giorno 30 nov 2017, alle ore 22:21, Jens Axboe <axboe@kernel.dk> ha scritto:
>>
>> On 11/28/2017 02:37 AM, Paolo Valente wrote:
>>> Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
>>> CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs:
>>> one reported in [1], plus a similar one in another function. This
>>> commit removes both batches, in the way suggested in [1].
>>
>> Some comments below.
>>
>>> +static inline void bfq_update_dispatch_stats(struct request *rq,
>>> +					     spinlock_t *queue_lock,
>>> +					     struct bfq_queue *in_serv_queue,
>>> +					     bool idle_timer_disabled)
>>> +{
>>
>> Don't pass in the queue lock. The normal convention is to pass in the
>> queue, thus making this:
>>
>> static void bfq_update_dispatch_stats(struct request_queue *q,
>> 				      struct request *rq,
>> 				      struct bfq_queue *in_serv_queue,
>> 				      bool idle_timer_disabled)
>>
> 
> Ok, thanks.  One question, just to try to learn, if you have time and
> patience for a brief explanation.  Was this convention originated by
> some rationale?  My concern is that bfq_update_dispatch_stats works on
> no field of q but the lock, and this fact would have been made
> explicit by passing only that exact field.

When you just pass in a lock, nobody knows what that lock is without
looking at the caller. If you pass in the queue, it's apparent
what is being locked.

>> which also gets rid of the inline. In general, never inline anything.
>> The compiler should figure it out for you. This function is way too big
>> to inline, plus the cost of what it's doing completely dwarfes function
>> call overhead.
>>
> 
> Actually, I did so because of Linus' suggestion in [1]: "So for
> example, the functions that can go away should obviously be inline
> functions so that you don't end up having the compiler generate the
> arguments and the call to an empty function body ..."
> 
> Maybe I misinterpreted his suggestion, and he meant that the function
> should be designed in such a way to be (almost) certainly considered
> inline by the compiler?

You can do that for the empty version, don't do it for the non-empty
version. That will go away, the other one will not.

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] block, bfq: remove batches of confusing ifdefs
  2017-12-02 16:06     ` Jens Axboe
@ 2017-12-02 17:22       ` Paolo Valente
  0 siblings, 0 replies; 6+ messages in thread
From: Paolo Valente @ 2017-12-02 17:22 UTC (permalink / raw)
  To: Jens Axboe
  Cc: linux-block, Linux Kernel Mailing List, Ulf Hansson, broonie,
	linus.walleij, lucmiccio, bfq-iosched


> Il giorno 02 dic 2017, alle ore 17:06, Jens Axboe <axboe@kernel.dk> ha =
scritto:
>=20
> On 12/02/2017 03:04 AM, Paolo Valente wrote:
>>=20
>>> Il giorno 30 nov 2017, alle ore 22:21, Jens Axboe <axboe@kernel.dk> =
ha scritto:
>>>=20
>>> On 11/28/2017 02:37 AM, Paolo Valente wrote:
>>>> Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
>>>> CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing =
ifdefs:
>>>> one reported in [1], plus a similar one in another function. This
>>>> commit removes both batches, in the way suggested in [1].
>>>=20
>>> Some comments below.
>>>=20
>>>> +static inline void bfq_update_dispatch_stats(struct request *rq,
>>>> +					     spinlock_t *queue_lock,
>>>> +					     struct bfq_queue =
*in_serv_queue,
>>>> +					     bool idle_timer_disabled)
>>>> +{
>>>=20
>>> Don't pass in the queue lock. The normal convention is to pass in =
the
>>> queue, thus making this:
>>>=20
>>> static void bfq_update_dispatch_stats(struct request_queue *q,
>>> 				      struct request *rq,
>>> 				      struct bfq_queue *in_serv_queue,
>>> 				      bool idle_timer_disabled)
>>>=20
>>=20
>> Ok, thanks.  One question, just to try to learn, if you have time and
>> patience for a brief explanation.  Was this convention originated by
>> some rationale?  My concern is that bfq_update_dispatch_stats works =
on
>> no field of q but the lock, and this fact would have been made
>> explicit by passing only that exact field.
>=20
> When you just pass in a lock, nobody knows what that lock is without
> looking at the caller. If you pass in the queue, it's apparent
> what is being locked.
>=20

Got it, thanks a lot.

>>> which also gets rid of the inline. In general, never inline =
anything.
>>> The compiler should figure it out for you. This function is way too =
big
>>> to inline, plus the cost of what it's doing completely dwarfes =
function
>>> call overhead.
>>>=20
>>=20
>> Actually, I did so because of Linus' suggestion in [1]: "So for
>> example, the functions that can go away should obviously be inline
>> functions so that you don't end up having the compiler generate the
>> arguments and the call to an empty function body ..."
>>=20
>> Maybe I misinterpreted his suggestion, and he meant that the function
>> should be designed in such a way to be (almost) certainly considered
>> inline by the compiler?
>=20
> You can do that for the empty version, don't do it for the non-empty
> version. That will go away, the other one will not.
>=20

Of course, thanks, and sorry for the silly question.

I'll make and submit a new patch according to your comments.

Paolo

> --=20
> Jens Axboe

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2017-12-02 17:22 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-28  9:37 [PATCH] block, bfq: remove batches of confusing ifdefs Paolo Valente
2017-11-28 10:39 ` Ulf Hansson
2017-11-30 21:21 ` Jens Axboe
2017-12-02 10:04   ` Paolo Valente
2017-12-02 16:06     ` Jens Axboe
2017-12-02 17:22       ` Paolo Valente

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).