[1/4] sched/numa: Rename nr_running and break out the magic number
diff mbox series

Message ID 20201120090630.3286-2-mgorman@techsingularity.net
State Accepted
Commit abeae76a47005aa3f07c9be12d8076365622e25c
Headers show
Series
  • Revisit NUMA imbalance tolerance and fork balancing
Related show

Commit Message

Mel Gorman Nov. 20, 2020, 9:06 a.m. UTC
This is simply a preparation patch to make the following patches easier
to read. No functional change.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
---
 kernel/sched/fair.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

Comments

Vincent Guittot Nov. 20, 2020, 1:32 p.m. UTC | #1
On Fri, 20 Nov 2020 at 10:06, Mel Gorman <mgorman@techsingularity.net> wrote:
>
> This is simply a preparation patch to make the following patches easier
> to read. No functional change.
>
> Signed-off-by: Mel Gorman <mgorman@techsingularity.net>

Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>

> ---
>  kernel/sched/fair.c | 10 ++++++----
>  1 file changed, 6 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 6d78b68847f9..5fbed29e4001 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1550,7 +1550,7 @@ struct task_numa_env {
>  static unsigned long cpu_load(struct rq *rq);
>  static unsigned long cpu_runnable(struct rq *rq);
>  static unsigned long cpu_util(int cpu);
> -static inline long adjust_numa_imbalance(int imbalance, int nr_running);
> +static inline long adjust_numa_imbalance(int imbalance, int dst_running);
>
>  static inline enum
>  numa_type numa_classify(unsigned int imbalance_pct,
> @@ -8991,7 +8991,9 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
>         }
>  }
>
> -static inline long adjust_numa_imbalance(int imbalance, int nr_running)
> +#define NUMA_IMBALANCE_MIN 2
> +
> +static inline long adjust_numa_imbalance(int imbalance, int dst_running)
>  {
>         unsigned int imbalance_min;
>
> @@ -8999,8 +9001,8 @@ static inline long adjust_numa_imbalance(int imbalance, int nr_running)
>          * Allow a small imbalance based on a simple pair of communicating
>          * tasks that remain local when the source domain is almost idle.
>          */
> -       imbalance_min = 2;
> -       if (nr_running <= imbalance_min)
> +       imbalance_min = NUMA_IMBALANCE_MIN;
> +       if (dst_running <= imbalance_min)
>                 return 0;
>
>         return imbalance;
> --
> 2.26.2
>

Patch
diff mbox series

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6d78b68847f9..5fbed29e4001 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1550,7 +1550,7 @@  struct task_numa_env {
 static unsigned long cpu_load(struct rq *rq);
 static unsigned long cpu_runnable(struct rq *rq);
 static unsigned long cpu_util(int cpu);
-static inline long adjust_numa_imbalance(int imbalance, int nr_running);
+static inline long adjust_numa_imbalance(int imbalance, int dst_running);
 
 static inline enum
 numa_type numa_classify(unsigned int imbalance_pct,
@@ -8991,7 +8991,9 @@  static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 	}
 }
 
-static inline long adjust_numa_imbalance(int imbalance, int nr_running)
+#define NUMA_IMBALANCE_MIN 2
+
+static inline long adjust_numa_imbalance(int imbalance, int dst_running)
 {
 	unsigned int imbalance_min;
 
@@ -8999,8 +9001,8 @@  static inline long adjust_numa_imbalance(int imbalance, int nr_running)
 	 * Allow a small imbalance based on a simple pair of communicating
 	 * tasks that remain local when the source domain is almost idle.
 	 */
-	imbalance_min = 2;
-	if (nr_running <= imbalance_min)
+	imbalance_min = NUMA_IMBALANCE_MIN;
+	if (dst_running <= imbalance_min)
 		return 0;
 
 	return imbalance;