linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] blk-wbt: fix performance regression in wbt scale_up/scale_down
@ 2019-10-04 18:09 Harshad Shirwadkar
  2019-10-04 20:53 ` harshad shirwadkar
  0 siblings, 1 reply; 2+ messages in thread
From: Harshad Shirwadkar @ 2019-10-04 18:09 UTC (permalink / raw)
  To: linux-kernel; +Cc: Harshad Shirwadkar, Josef Bacik

scale_up wakes up waiters after scaling up. But after scaling max, it
should not wake up more waiters as waiters will not have anything to
do. This patch fixes this by making scale_up (and also scale_down)
return when threshold is reached.

This bug causes increased fdatasync latency when fdatasync and dd
conv=sync are performed in parallel on 4.19 compared to 4.14. This
bug was introduced during refactoring of blk-wbt code.

Fixes: a79050434b45 ("blk-rq-qos: refactor out common elements of blk-wbt")
Cc: Josef Bacik <jbacik@fb.com>
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
---
 block/blk-rq-qos.c | 14 +++++++++-----
 block/blk-rq-qos.h |  4 ++--
 block/blk-wbt.c    |  6 ++++--
 3 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 3954c0dc1443..d92abb43000c 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -142,24 +142,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
 	return ret;
 }
 
-void rq_depth_scale_up(struct rq_depth *rqd)
+/* Returns true on success and false if scaling up wasn't possible */
+bool rq_depth_scale_up(struct rq_depth *rqd)
 {
 	/*
 	 * Hit max in previous round, stop here
 	 */
 	if (rqd->scaled_max)
-		return;
+		return false;
 
 	rqd->scale_step--;
 
 	rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+	return true;
 }
 
 /*
  * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
+ * had a latency violation. Returns true on success and returns false if
+ * scaling down wasn't possible.
  */
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 {
 	/*
 	 * Stop scaling down when we've hit the limit. This also prevents
@@ -167,7 +170,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 	 * keep up.
 	 */
 	if (rqd->max_depth == 1)
-		return;
+		return false;
 
 	if (rqd->scale_step < 0 && hard_throttle)
 		rqd->scale_step = 0;
@@ -176,6 +179,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 
 	rqd->scaled_max = false;
 	rq_depth_calc_max_depth(rqd);
+	return 0;
 }
 
 struct rq_qos_wait_data {
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 2300e038b9fa..c0f0778d5396 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -125,8 +125,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
 		 acquire_inflight_cb_t *acquire_inflight_cb,
 		 cleanup_cb_t *cleanup_cb);
 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
-void rq_depth_scale_up(struct rq_depth *rqd);
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_scale_up(struct rq_depth *rqd);
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
 
 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 313f45a37e9d..5a96881e7a52 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -308,7 +308,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
 
 static void scale_up(struct rq_wb *rwb)
 {
-	rq_depth_scale_up(&rwb->rq_depth);
+	if (!rq_depth_scale_up(&rwb->rq_depth))
+		return;
 	calc_wb_limits(rwb);
 	rwb->unknown_cnt = 0;
 	rwb_wake_all(rwb);
@@ -317,7 +318,8 @@ static void scale_up(struct rq_wb *rwb)
 
 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 {
-	rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
+	if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
+		return;
 	calc_wb_limits(rwb);
 	rwb->unknown_cnt = 0;
 	rwb_trace_step(rwb, "scale down");
-- 
2.23.0.581.g78d2f28ef7-goog


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] blk-wbt: fix performance regression in wbt scale_up/scale_down
  2019-10-04 18:09 [PATCH] blk-wbt: fix performance regression in wbt scale_up/scale_down Harshad Shirwadkar
@ 2019-10-04 20:53 ` harshad shirwadkar
  0 siblings, 0 replies; 2+ messages in thread
From: harshad shirwadkar @ 2019-10-04 20:53 UTC (permalink / raw)
  To: linux-kernel; +Cc: Josef Bacik, vaibhavrustagi

On Fri, Oct 4, 2019 at 11:09 AM Harshad Shirwadkar
<harshadshirwadkar@gmail.com> wrote:
>
> scale_up wakes up waiters after scaling up. But after scaling max, it
> should not wake up more waiters as waiters will not have anything to
> do. This patch fixes this by making scale_up (and also scale_down)
> return when threshold is reached.
>
> This bug causes increased fdatasync latency when fdatasync and dd
> conv=sync are performed in parallel on 4.19 compared to 4.14. This
> bug was introduced during refactoring of blk-wbt code.
>
> Fixes: a79050434b45 ("blk-rq-qos: refactor out common elements of blk-wbt")
> Cc: Josef Bacik <jbacik@fb.com>
> Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
> ---
>  block/blk-rq-qos.c | 14 +++++++++-----
>  block/blk-rq-qos.h |  4 ++--
>  block/blk-wbt.c    |  6 ++++--
>  3 files changed, 15 insertions(+), 9 deletions(-)
>
> diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
> index 3954c0dc1443..d92abb43000c 100644
> --- a/block/blk-rq-qos.c
> +++ b/block/blk-rq-qos.c
> @@ -142,24 +142,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
>         return ret;
>  }
>
> -void rq_depth_scale_up(struct rq_depth *rqd)
> +/* Returns true on success and false if scaling up wasn't possible */
> +bool rq_depth_scale_up(struct rq_depth *rqd)
>  {
>         /*
>          * Hit max in previous round, stop here
>          */
>         if (rqd->scaled_max)
> -               return;
> +               return false;
>
>         rqd->scale_step--;
>
>         rqd->scaled_max = rq_depth_calc_max_depth(rqd);
> +       return true;
>  }
>
>  /*
>   * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
> - * had a latency violation.
> + * had a latency violation. Returns true on success and returns false if
> + * scaling down wasn't possible.
>   */
> -void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
> +bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
>  {
>         /*
>          * Stop scaling down when we've hit the limit. This also prevents
> @@ -167,7 +170,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
>          * keep up.
>          */
>         if (rqd->max_depth == 1)
> -               return;
> +               return false;
>
>         if (rqd->scale_step < 0 && hard_throttle)
>                 rqd->scale_step = 0;
> @@ -176,6 +179,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
>
>         rqd->scaled_max = false;
>         rq_depth_calc_max_depth(rqd);
> +       return 0;
Oops, I meant return true here, thanks Vaibhav (+cc) for pointing this
out. I'll fix this in V2.

>  }
>
>  struct rq_qos_wait_data {
> diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
> index 2300e038b9fa..c0f0778d5396 100644
> --- a/block/blk-rq-qos.h
> +++ b/block/blk-rq-qos.h
> @@ -125,8 +125,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
>                  acquire_inflight_cb_t *acquire_inflight_cb,
>                  cleanup_cb_t *cleanup_cb);
>  bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
> -void rq_depth_scale_up(struct rq_depth *rqd);
> -void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
> +bool rq_depth_scale_up(struct rq_depth *rqd);
> +bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
>  bool rq_depth_calc_max_depth(struct rq_depth *rqd);
>
>  void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
> diff --git a/block/blk-wbt.c b/block/blk-wbt.c
> index 313f45a37e9d..5a96881e7a52 100644
> --- a/block/blk-wbt.c
> +++ b/block/blk-wbt.c
> @@ -308,7 +308,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
>
>  static void scale_up(struct rq_wb *rwb)
>  {
> -       rq_depth_scale_up(&rwb->rq_depth);
> +       if (!rq_depth_scale_up(&rwb->rq_depth))
> +               return;
>         calc_wb_limits(rwb);
>         rwb->unknown_cnt = 0;
>         rwb_wake_all(rwb);
> @@ -317,7 +318,8 @@ static void scale_up(struct rq_wb *rwb)
>
>  static void scale_down(struct rq_wb *rwb, bool hard_throttle)
>  {
> -       rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
> +       if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
> +               return;
>         calc_wb_limits(rwb);
>         rwb->unknown_cnt = 0;
>         rwb_trace_step(rwb, "scale down");
> --
> 2.23.0.581.g78d2f28ef7-goog
>

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-10-04 20:53 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-04 18:09 [PATCH] blk-wbt: fix performance regression in wbt scale_up/scale_down Harshad Shirwadkar
2019-10-04 20:53 ` harshad shirwadkar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).