linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vincent Guittot <vincent.guittot@linaro.org>
To: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	Rik van Riel <riel@surriel.com>,
	Morten Rasmussen <morten.rasmussen@arm.com>,
	Quentin Perret <quentin.perret@arm.com>,
	Valentin Schneider <valentin.schneider@arm.com>,
	Patrick Bellasi <patrick.bellasi@arm.com>,
	linux-kernel <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()
Date: Mon, 27 May 2019 15:31:27 +0200	[thread overview]
Message-ID: <CAKfTPtA4hs1+Xu_bG5o2RjOZy792_yLTm_GSv2u6t7_qyfjhTQ@mail.gmail.com> (raw)
In-Reply-To: <20190527062116.11512-8-dietmar.eggemann@arm.com>

On Mon, 27 May 2019 at 08:21, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>
> This is done to align the per cpu (i.e. per rq) load with the util
> counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
> since there is no 'unweighted' load to distinguish it from.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
> ---
>  kernel/sched/fair.c | 44 ++++++++++++++++++++------------------------
>  1 file changed, 20 insertions(+), 24 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index a33f196703a7..f6d0aad13090 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1466,7 +1466,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
>                group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
>  }
>
> -static unsigned long weighted_cpuload(struct rq *rq);
> +static unsigned long cpu_load(int cpu);
>
>  /* Cached statistics for all CPUs within a node */
>  struct numa_stats {
> @@ -1485,9 +1485,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
>
>         memset(ns, 0, sizeof(*ns));
>         for_each_cpu(cpu, cpumask_of_node(nid)) {
> -               struct rq *rq = cpu_rq(cpu);
> -
> -               ns->load += weighted_cpuload(rq);
> +               ns->load += cpu_load(cpu);
>                 ns->compute_capacity += capacity_of(cpu);
>         }
>
> @@ -5334,9 +5332,9 @@ static struct {
>
>  #endif /* CONFIG_NO_HZ_COMMON */
>
> -static unsigned long weighted_cpuload(struct rq *rq)
> +static unsigned long cpu_load(int cpu)

it would be better to use cpu_runnable_load instead of cpu_load
because it returns runnable_load_avg and not load_avg

>  {
> -       return cfs_rq_runnable_load_avg(&rq->cfs);
> +       return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
>  }
>
>  static unsigned long capacity_of(int cpu)
> @@ -5348,7 +5346,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
>  {
>         struct rq *rq = cpu_rq(cpu);
>         unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
> -       unsigned long load_avg = weighted_cpuload(rq);
> +       unsigned long load_avg = cpu_load(cpu);
>
>         if (nr_running)
>                 return load_avg / nr_running;
> @@ -5446,7 +5444,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
>         s64 this_eff_load, prev_eff_load;
>         unsigned long task_load;
>
> -       this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
> +       this_eff_load = cpu_load(this_cpu);
>
>         if (sync) {
>                 unsigned long current_load = task_h_load(current);
> @@ -5464,7 +5462,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
>                 this_eff_load *= 100;
>         this_eff_load *= capacity_of(prev_cpu);
>
> -       prev_eff_load = weighted_cpuload(cpu_rq(this_cpu));
> +       prev_eff_load = cpu_load(this_cpu);
>         prev_eff_load -= task_load;
>         if (sched_feat(WA_BIAS))
>                 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
> @@ -5552,7 +5550,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
>                 max_spare_cap = 0;
>
>                 for_each_cpu(i, sched_group_span(group)) {
> -                       load = weighted_cpuload(cpu_rq(i));
> +                       load = cpu_load(i);
>                         runnable_load += load;
>
>                         avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
> @@ -5688,7 +5686,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
>                                 shallowest_idle_cpu = i;
>                         }
>                 } else if (shallowest_idle_cpu == -1) {
> -                       load = weighted_cpuload(cpu_rq(i));
> +                       load = cpu_load(i);
>                         if (load < min_load) {
>                                 min_load = load;
>                                 least_loaded_cpu = i;
> @@ -7259,8 +7257,8 @@ static struct task_struct *detach_one_task(struct lb_env *env)
>  static const unsigned int sched_nr_migrate_break = 32;
>
>  /*
> - * detach_tasks() -- tries to detach up to imbalance weighted load from
> - * busiest_rq, as part of a balancing operation within domain "sd".
> + * detach_tasks() -- tries to detach up to imbalance load from busiest_rq,
> + * as part of a balancing operation within domain "sd".
>   *
>   * Returns number of detached tasks if successful and 0 otherwise.
>   */
> @@ -7326,8 +7324,7 @@ static int detach_tasks(struct lb_env *env)
>  #endif
>
>                 /*
> -                * We only want to steal up to the prescribed amount of
> -                * weighted load.
> +                * We only want to steal up to the prescribed amount of load.
>                  */
>                 if (env->imbalance <= 0)
>                         break;
> @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
>                 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
>                         env->flags |= LBF_NOHZ_AGAIN;
>
> -               sgs->group_load += weighted_cpuload(rq);
> +               sgs->group_load += cpu_load(i);
>                 sgs->group_util += cpu_util(i);
>                 sgs->sum_nr_running += rq->cfs.h_nr_running;
>
> @@ -8385,8 +8382,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
>   * find_busiest_group - Returns the busiest group within the sched_domain
>   * if there is an imbalance.
>   *
> - * Also calculates the amount of weighted load which should be moved
> - * to restore balance.
> + * Also calculates the amount of load which should be moved to restore balance.
>   *
>   * @env: The load balancing environment.
>   *
> @@ -8558,11 +8554,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
>                     rq->nr_running == 1)
>                         continue;
>
> -               wl = weighted_cpuload(rq);
> +               wl = cpu_load(i);
>
>                 /*
> -                * When comparing with imbalance, use weighted_cpuload()
> -                * which is not scaled with the CPU capacity.
> +                * When comparing with imbalance, use cpu_load() which is not
> +                * scaled with the CPU capacity.
>                  */
>
>                 if (rq->nr_running == 1 && wl > env->imbalance &&
> @@ -8571,9 +8567,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,
>
>                 /*
>                  * For the load comparisons with the other CPU's, consider
> -                * the weighted_cpuload() scaled with the CPU capacity, so
> -                * that the load can be moved away from the CPU that is
> -                * potentially running at a lower capacity.
> +                * the cpu_load() scaled with the CPU capacity, so that the
> +                * load can be moved away from the CPU that is potentially
> +                * running at a lower capacity.
>                  *
>                  * Thus we're looking for max(wl_i / capacity_i), crosswise
>                  * multiplication to rid ourselves of the division works out
> --
> 2.17.1
>

  reply	other threads:[~2019-05-27 13:31 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-27  6:21 [PATCH 0/7] sched: Remove per rq load array Dietmar Eggemann
2019-05-27  6:21 ` [PATCH 1/7] sched: Remove rq->cpu_load[] update code Dietmar Eggemann
2019-05-27 16:09   ` Rik van Riel
2019-06-03 13:02   ` [tip:sched/core] sched/fair: Remove the " tip-bot for Dietmar Eggemann
2019-05-27  6:21 ` [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload() Dietmar Eggemann
2019-05-27 16:09   ` Rik van Riel
2019-05-28 10:24   ` Dietmar Eggemann
2019-05-28 10:53     ` Peter Zijlstra
2019-06-03 13:02   ` [tip:sched/core] sched/fair: Replace source_load() & target_load() with weighted_cpuload() tip-bot for Dietmar Eggemann
2019-05-27  6:21 ` [PATCH 3/7] sched/debug: Remove sd->*_idx range on sysctl Dietmar Eggemann
2019-05-27 16:10   ` Rik van Riel
2019-06-03 13:03   ` [tip:sched/core] " tip-bot for Dietmar Eggemann
2019-05-27  6:21 ` [PATCH 4/7] sched: Remove rq->cpu_load[] Dietmar Eggemann
2019-05-27 16:10   ` Rik van Riel
2019-06-03 13:04   ` [tip:sched/core] sched/core: " tip-bot for Dietmar Eggemann
2019-05-27  6:21 ` [PATCH 5/7] sched: Remove sd->*_idx Dietmar Eggemann
2019-05-27 16:12   ` Rik van Riel
2019-06-03 13:04   ` [tip:sched/core] sched/core: " tip-bot for Dietmar Eggemann
2019-05-27  6:21 ` [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load Dietmar Eggemann
2019-05-27 14:07   ` Vincent Guittot
2019-05-27 16:13   ` Rik van Riel
2019-06-03 13:05   ` [tip:sched/core] " tip-bot for Dietmar Eggemann
2019-05-27  6:21 ` [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load() Dietmar Eggemann
2019-05-27 13:31   ` Vincent Guittot [this message]
2019-05-27 16:24   ` Rik van Riel
2019-05-27 19:13     ` Peter Zijlstra
2019-06-18 12:23       ` Dietmar Eggemann
2019-06-25  8:29         ` [tip:sched/core] sched/fair: Rename weighted_cpuload() to cpu_runnable_load() tip-bot for Dietmar Eggemann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAKfTPtA4hs1+Xu_bG5o2RjOZy792_yLTm_GSv2u6t7_qyfjhTQ@mail.gmail.com \
    --to=vincent.guittot@linaro.org \
    --cc=dietmar.eggemann@arm.com \
    --cc=fweisbec@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=morten.rasmussen@arm.com \
    --cc=patrick.bellasi@arm.com \
    --cc=peterz@infradead.org \
    --cc=quentin.perret@arm.com \
    --cc=riel@surriel.com \
    --cc=tglx@linutronix.de \
    --cc=valentin.schneider@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).