All of lore.kernel.org
 help / color / mirror / Atom feed
From: pang.xunlei@zte.com.cn
To: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Dietmar Eggemann <Dietmar.Eggemann@arm.com>,
	Juri Lelli <Juri.Lelli@arm.com>,
	linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org,
	mingo@redhat.com, morten.rasmussen@arm.com,
	mturquette@linaro.org, peterz@infradead.org,
	preeti@linux.vnet.ibm.com, rjw@rjwysocki.net,
	sgurrappadi@nvidia.com, vincent.guittot@linaro.org,
	yuyang.du@intel.com, pang.xunlei@linaro.org
Subject: Re: [RFCv4 PATCH 25/34] sched: Add over-utilization/tipping point indicator
Date: Tue, 30 Jun 2015 17:35:41 +0800	[thread overview]
Message-ID: <OF461FAF0B.652BAC50-ON48257E74.0030491E-48257E74.0034AB06@zte.com.cn> (raw)
In-Reply-To: <1431459549-18343-26-git-send-email-morten.rasmussen@arm.com>

Hi Morten,

Morten Rasmussen <morten.rasmussen@arm.com> wrote 2015-05-13 AM 03:39:00:
> 
> [RFCv4 PATCH 25/34] sched: Add over-utilization/tipping point indicator
> 
> Energy-aware scheduling is only meant to be active while the system is
> _not_ over-utilized. That is, there are spare cycles available to shift
> tasks around based on their actual utilization to get a more
> energy-efficient task distribution without depriving any tasks. When
> above the tipping point task placement is done the traditional way,
> spreading the tasks across as many cpus as possible based on priority
> scaled load to preserve smp_nice.
> 
> The over-utilization condition is conservatively chosen to indicate
> over-utilization as soon as one cpu is fully utilized at it's highest
> frequency. We don't consider groups as lumping usage and capacity
> together for a group of cpus may hide the fact that one or more cpus in
> the group are over-utilized while group-siblings are partially idle. The
> tasks could be served better if moved to another group with completely
> idle cpus. This is particularly problematic if some cpus have a
> significantly reduced capacity due to RT/IRQ pressure or if the system
> has cpus of different capacity (e.g. ARM big.LITTLE).
> 
> cc: Ingo Molnar <mingo@redhat.com>
> cc: Peter Zijlstra <peterz@infradead.org>
> 
> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
> ---
>  kernel/sched/fair.c  | 35 +++++++++++++++++++++++++++++++----
>  kernel/sched/sched.h |  3 +++
>  2 files changed, 34 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index f36ab2f3..5b7bc28 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -4266,6 +4266,8 @@ static inline void hrtick_update(struct rq *rq)
>  }
>  #endif
> 
> +static bool cpu_overutilized(int cpu);
> +
>  /*
>   * The enqueue_task method is called before nr_running is
>   * increased. Here we update the fair scheduling stats and
> @@ -4276,6 +4278,7 @@ enqueue_task_fair(struct rq *rq, struct 
> task_struct *p, int flags)
>  {
>     struct cfs_rq *cfs_rq;
>     struct sched_entity *se = &p->se;
> +   int task_new = !(flags & ENQUEUE_WAKEUP);
> 
>     for_each_sched_entity(se) {
>        if (se->on_rq)
> @@ -4310,6 +4313,9 @@ enqueue_task_fair(struct rq *rq, struct 
> task_struct *p, int flags)
>     if (!se) {
>        update_rq_runnable_avg(rq, rq->nr_running);
>        add_nr_running(rq, 1);
> +      if (!task_new && !rq->rd->overutilized &&
> +          cpu_overutilized(rq->cpu))
> +         rq->rd->overutilized = true;
>     }
>     hrtick_update(rq);
>  }
> @@ -4937,6 +4943,14 @@ static int find_new_capacity(struct energy_env 
*eenv,
>     return idx;
>  }
> 
> +static unsigned int capacity_margin = 1280; /* ~20% margin */
> +
> +static bool cpu_overutilized(int cpu)
> +{
> +   return (capacity_of(cpu) * 1024) <
> +            (get_cpu_usage(cpu) * capacity_margin);
> +}
> +
>  /*
>   * sched_group_energy(): Returns absolute energy consumption of 
> cpus belonging
>   * to the sched_group including shared resources shared only by 
> members of the
> @@ -6732,11 +6746,12 @@ static enum group_type group_classify(struct
> lb_env *env,
>   * @local_group: Does group contain this_cpu.
>   * @sgs: variable to hold the statistics for this group.
>   * @overload: Indicate more than one runnable task for any CPU.
> + * @overutilized: Indicate overutilization for any CPU.
>   */
>  static inline void update_sg_lb_stats(struct lb_env *env,
>           struct sched_group *group, int load_idx,
>           int local_group, struct sg_lb_stats *sgs,
> -         bool *overload)
> +         bool *overload, bool *overutilized)
>  {
>     unsigned long load;
>     int i;
> @@ -6766,6 +6781,9 @@ static inline void update_sg_lb_stats(struct 
> lb_env *env,
>        sgs->sum_weighted_load += weighted_cpuload(i);
>        if (idle_cpu(i))
>           sgs->idle_cpus++;
> +
> +      if (cpu_overutilized(i))
> +         *overutilized = true;
>     }
> 
>     /* Adjust by relative CPU capacity of the group */
> @@ -6871,7 +6889,7 @@ static inline void update_sd_lb_stats(struct 
> lb_env *env, struct sd_lb_stats *sd
>     struct sched_group *sg = env->sd->groups;
>     struct sg_lb_stats tmp_sgs;
>     int load_idx, prefer_sibling = 0;
> -   bool overload = false;
> +   bool overload = false, overutilized = false;
> 
>     if (child && child->flags & SD_PREFER_SIBLING)
>        prefer_sibling = 1;
> @@ -6893,7 +6911,7 @@ static inline void update_sd_lb_stats(struct 
> lb_env *env, struct sd_lb_stats *sd
>        }
> 
>        update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
> -                  &overload);
> +                  &overload, &overutilized);
> 
>        if (local_group)
>           goto next_group;
> @@ -6935,8 +6953,14 @@ static inline void update_sd_lb_stats(struct 
> lb_env *env, struct sd_lb_stats *sd
>        /* update overload indicator if we are at root domain */
>        if (env->dst_rq->rd->overload != overload)
>           env->dst_rq->rd->overload = overload;
> -   }
> 
> +      /* Update over-utilization (tipping point, U >= 0) indicator */
> +      if (env->dst_rq->rd->overutilized != overutilized)
> +         env->dst_rq->rd->overutilized = overutilized;
> +   } else {
> +      if (!env->dst_rq->rd->overutilized && overutilized)
> +         env->dst_rq->rd->overutilized = true;
> +   }
>  }
> 
>  /**
> @@ -8300,6 +8324,9 @@ static void task_tick_fair(struct rq *rq, 
> struct task_struct *curr, int queued)
>        task_tick_numa(rq, curr);
> 
>     update_rq_runnable_avg(rq, 1);
> +
> +   if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
> +      rq->rd->overutilized = true;
>  }
> 
>  /*
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index b627dfa..a5d2d69 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -535,6 +535,9 @@ struct root_domain {
>     /* Indicate more than one runnable task for any CPU */
>     bool overload;
> 
> +   /* Indicate one or more cpus over-utilized (tipping point) */
> +   bool overutilized;
> +
>     /*
>      * The bit corresponding to a CPU gets set here if such CPU has more
>      * than one runnable -deadline task (as it is below for RT tasks).
> -- 
> 1.9.1
> 

The tipping point idea is great for EAS, but I wonder if it is 
an issue below that I found during my test:

I used rt-app to emulate the work load to test the patchset.

First of all, I added rt-app small work load gradually until 
the utilization was around the tipping point, and EAS worked 
great putting tasks on small cores.

Then I went on to add some extra small load to break the 
tipping point, immediately CFS load-balancer took over EAS
(as I could see the big cores had some tasks running), but 
at this point it seemed that the system fluctuated back and 
forth badly between the big cores and small cores, and the 
util result displayed by "top" was a bit weird.

I guess when exceeding the tipping point, CFS takes over EAS,
so it will migrate tasks from small cores to big cores, as
a result, the cpu util of small cores will be again below the 
tipping point due to some tasks migrated by CFS load-balancer, 
which will activate the EAS again.

Therefore, the system just fluctuates like that.

-Xunlei

--------------------------------------------------------
ZTE Information Security Notice: The information contained in this mail (and any attachment transmitted herewith) is privileged and confidential and is intended for the exclusive use of the addressee(s).  If you are not an intended recipient, any disclosure, reproduction, distribution or other dissemination or use of the information contained is strictly prohibited.  If you have received this mail in error, please delete it and notify us immediately.

WARNING: multiple messages have this Message-ID (diff)
From: pang.xunlei@zte.com.cn
Cc: Dietmar Eggemann <Dietmar.Eggemann@arm.com>,
	Juri Lelli <Juri.Lelli@arm.com>,
	linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org,
	mingo@redhat.com, morten.rasmussen@arm.com,
	mturquette@linaro.org, peterz@infradead.org,
	preeti@linux.vnet.ibm.com, rjw@rjwysocki.net,
	sgurrappadi@nvidia.com, vincent.guittot@linaro.org,
	yuyang.du@intel.com, pang.xunlei@linaro.org
Subject: Re: [RFCv4 PATCH 25/34] sched: Add over-utilization/tipping point indicator
Date: Tue, 30 Jun 2015 17:35:41 +0800	[thread overview]
Message-ID: <OF461FAF0B.652BAC50-ON48257E74.0030491E-48257E74.0034AB06@zte.com.cn> (raw)
In-Reply-To: <1431459549-18343-26-git-send-email-morten.rasmussen@arm.com>

Hi Morten,

Morten Rasmussen <morten.rasmussen@arm.com> wrote 2015-05-13 AM 03:39:00:
> 
> [RFCv4 PATCH 25/34] sched: Add over-utilization/tipping point indicator
> 
> Energy-aware scheduling is only meant to be active while the system is
> _not_ over-utilized. That is, there are spare cycles available to shift
> tasks around based on their actual utilization to get a more
> energy-efficient task distribution without depriving any tasks. When
> above the tipping point task placement is done the traditional way,
> spreading the tasks across as many cpus as possible based on priority
> scaled load to preserve smp_nice.
> 
> The over-utilization condition is conservatively chosen to indicate
> over-utilization as soon as one cpu is fully utilized at it's highest
> frequency. We don't consider groups as lumping usage and capacity
> together for a group of cpus may hide the fact that one or more cpus in
> the group are over-utilized while group-siblings are partially idle. The
> tasks could be served better if moved to another group with completely
> idle cpus. This is particularly problematic if some cpus have a
> significantly reduced capacity due to RT/IRQ pressure or if the system
> has cpus of different capacity (e.g. ARM big.LITTLE).
> 
> cc: Ingo Molnar <mingo@redhat.com>
> cc: Peter Zijlstra <peterz@infradead.org>
> 
> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
> ---
>  kernel/sched/fair.c  | 35 +++++++++++++++++++++++++++++++----
>  kernel/sched/sched.h |  3 +++
>  2 files changed, 34 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index f36ab2f3..5b7bc28 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -4266,6 +4266,8 @@ static inline void hrtick_update(struct rq *rq)
>  }
>  #endif
> 
> +static bool cpu_overutilized(int cpu);
> +
>  /*
>   * The enqueue_task method is called before nr_running is
>   * increased. Here we update the fair scheduling stats and
> @@ -4276,6 +4278,7 @@ enqueue_task_fair(struct rq *rq, struct 
> task_struct *p, int flags)
>  {
>     struct cfs_rq *cfs_rq;
>     struct sched_entity *se = &p->se;
> +   int task_new = !(flags & ENQUEUE_WAKEUP);
> 
>     for_each_sched_entity(se) {
>        if (se->on_rq)
> @@ -4310,6 +4313,9 @@ enqueue_task_fair(struct rq *rq, struct 
> task_struct *p, int flags)
>     if (!se) {
>        update_rq_runnable_avg(rq, rq->nr_running);
>        add_nr_running(rq, 1);
> +      if (!task_new && !rq->rd->overutilized &&
> +          cpu_overutilized(rq->cpu))
> +         rq->rd->overutilized = true;
>     }
>     hrtick_update(rq);
>  }
> @@ -4937,6 +4943,14 @@ static int find_new_capacity(struct energy_env 
*eenv,
>     return idx;
>  }
> 
> +static unsigned int capacity_margin = 1280; /* ~20% margin */
> +
> +static bool cpu_overutilized(int cpu)
> +{
> +   return (capacity_of(cpu) * 1024) <
> +            (get_cpu_usage(cpu) * capacity_margin);
> +}
> +
>  /*
>   * sched_group_energy(): Returns absolute energy consumption of 
> cpus belonging
>   * to the sched_group including shared resources shared only by 
> members of the
> @@ -6732,11 +6746,12 @@ static enum group_type group_classify(struct
> lb_env *env,
>   * @local_group: Does group contain this_cpu.
>   * @sgs: variable to hold the statistics for this group.
>   * @overload: Indicate more than one runnable task for any CPU.
> + * @overutilized: Indicate overutilization for any CPU.
>   */
>  static inline void update_sg_lb_stats(struct lb_env *env,
>           struct sched_group *group, int load_idx,
>           int local_group, struct sg_lb_stats *sgs,
> -         bool *overload)
> +         bool *overload, bool *overutilized)
>  {
>     unsigned long load;
>     int i;
> @@ -6766,6 +6781,9 @@ static inline void update_sg_lb_stats(struct 
> lb_env *env,
>        sgs->sum_weighted_load += weighted_cpuload(i);
>        if (idle_cpu(i))
>           sgs->idle_cpus++;
> +
> +      if (cpu_overutilized(i))
> +         *overutilized = true;
>     }
> 
>     /* Adjust by relative CPU capacity of the group */
> @@ -6871,7 +6889,7 @@ static inline void update_sd_lb_stats(struct 
> lb_env *env, struct sd_lb_stats *sd
>     struct sched_group *sg = env->sd->groups;
>     struct sg_lb_stats tmp_sgs;
>     int load_idx, prefer_sibling = 0;
> -   bool overload = false;
> +   bool overload = false, overutilized = false;
> 
>     if (child && child->flags & SD_PREFER_SIBLING)
>        prefer_sibling = 1;
> @@ -6893,7 +6911,7 @@ static inline void update_sd_lb_stats(struct 
> lb_env *env, struct sd_lb_stats *sd
>        }
> 
>        update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
> -                  &overload);
> +                  &overload, &overutilized);
> 
>        if (local_group)
>           goto next_group;
> @@ -6935,8 +6953,14 @@ static inline void update_sd_lb_stats(struct 
> lb_env *env, struct sd_lb_stats *sd
>        /* update overload indicator if we are at root domain */
>        if (env->dst_rq->rd->overload != overload)
>           env->dst_rq->rd->overload = overload;
> -   }
> 
> +      /* Update over-utilization (tipping point, U >= 0) indicator */
> +      if (env->dst_rq->rd->overutilized != overutilized)
> +         env->dst_rq->rd->overutilized = overutilized;
> +   } else {
> +      if (!env->dst_rq->rd->overutilized && overutilized)
> +         env->dst_rq->rd->overutilized = true;
> +   }
>  }
> 
>  /**
> @@ -8300,6 +8324,9 @@ static void task_tick_fair(struct rq *rq, 
> struct task_struct *curr, int queued)
>        task_tick_numa(rq, curr);
> 
>     update_rq_runnable_avg(rq, 1);
> +
> +   if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
> +      rq->rd->overutilized = true;
>  }
> 
>  /*
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index b627dfa..a5d2d69 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -535,6 +535,9 @@ struct root_domain {
>     /* Indicate more than one runnable task for any CPU */
>     bool overload;
> 
> +   /* Indicate one or more cpus over-utilized (tipping point) */
> +   bool overutilized;
> +
>     /*
>      * The bit corresponding to a CPU gets set here if such CPU has more
>      * than one runnable -deadline task (as it is below for RT tasks).
> -- 
> 1.9.1
> 

The tipping point idea is great for EAS, but I wonder if it is 
an issue below that I found during my test:

I used rt-app to emulate the work load to test the patchset.

First of all, I added rt-app small work load gradually until 
the utilization was around the tipping point, and EAS worked 
great putting tasks on small cores.

Then I went on to add some extra small load to break the 
tipping point, immediately CFS load-balancer took over EAS
(as I could see the big cores had some tasks running), but 
at this point it seemed that the system fluctuated back and 
forth badly between the big cores and small cores, and the 
util result displayed by "top" was a bit weird.

I guess when exceeding the tipping point, CFS takes over EAS,
so it will migrate tasks from small cores to big cores, as
a result, the cpu util of small cores will be again below the 
tipping point due to some tasks migrated by CFS load-balancer, 
which will activate the EAS again.

Therefore, the system just fluctuates like that.

-Xunlei

--------------------------------------------------------
ZTE Information Security Notice: The information contained in this mail (and any attachment transmitted herewith) is privileged and confidential and is intended for the exclusive use of the addressee(s).  If you are not an intended recipient, any disclosure, reproduction, distribution or other dissemination or use of the information contained is strictly prohibited.  If you have received this mail in error, please delete it and notify us immediately.

  parent reply	other threads:[~2015-06-30  9:36 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-12 19:38 [RFCv4 PATCH 00/34] sched: Energy cost model for energy-aware scheduling Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 01/34] arm: Frequency invariant scheduler load-tracking support Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 02/34] sched: Make load tracking frequency scale-invariant Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 03/34] arm: vexpress: Add CPU clock-frequencies to TC2 device-tree Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 04/34] sched: Convert arch_scale_cpu_capacity() from weak function to #define Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 05/34] arm: Update arch_scale_cpu_capacity() to reflect change to define Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 06/34] sched: Make usage tracking cpu scale-invariant Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 07/34] arm: Cpu invariant scheduler load-tracking support Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 08/34] sched: Get rid of scaling usage by cpu_capacity_orig Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 09/34] sched: Track blocked utilization contributions Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 10/34] sched: Include blocked utilization in usage tracking Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 11/34] sched: Remove blocked load and utilization contributions of dying tasks Morten Rasmussen
2015-05-13  0:33   ` Sai Gurrappadi
2015-05-13  0:33     ` Sai Gurrappadi
2015-05-13 13:49     ` Morten Rasmussen
2015-05-19 14:22       ` Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 12/34] sched: Initialize CFS task load and usage before placing task on rq Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 13/34] sched: Documentation for scheduler energy cost model Morten Rasmussen
2015-05-20  4:04   ` Kamalesh Babulal
2015-05-20  9:27     ` Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 14/34] sched: Make energy awareness a sched feature Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 15/34] sched: Introduce energy data structures Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 16/34] sched: Allocate and initialize " Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 17/34] sched: Introduce SD_SHARE_CAP_STATES sched_domain flag Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 18/34] arm: topology: Define TC2 energy and provide it to the scheduler Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 19/34] sched: Compute cpu capacity available at current frequency Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 20/34] sched: Relocated get_cpu_usage() and change return type Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 21/34] sched: Highest energy aware balancing sched_domain level pointer Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 22/34] sched: Calculate energy consumption of sched_group Morten Rasmussen
2015-05-21  7:57   ` Kamalesh Babulal
2015-05-22 15:38     ` Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 23/34] sched: Extend sched_group_energy to test load-balancing decisions Morten Rasmussen
2015-05-12 19:38 ` [RFCv4 PATCH 24/34] sched: Estimate energy impact of scheduling decisions Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 25/34] sched: Add over-utilization/tipping point indicator Morten Rasmussen
2015-05-22 19:48   ` [PATCH] sched: Fix compiler errors for NO_SMP machines Abel Vesa
2015-05-23 14:52     ` Ingo Molnar
2015-05-23 19:22       ` Abel Vesa
2015-06-30  9:35   ` pang.xunlei [this message]
2015-06-30  9:35     ` [RFCv4 PATCH 25/34] sched: Add over-utilization/tipping point indicator pang.xunlei
2015-05-12 19:39 ` [RFCv4 PATCH 26/34] sched: Store system-wide maximum cpu capacity in root domain Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 27/34] sched, cpuidle: Track cpuidle state index in the scheduler Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 28/34] sched: Count number of shallower idle-states in struct sched_group_energy Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 29/34] sched: Determine the current sched_group idle-state Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 30/34] sched: Add cpu capacity awareness to wakeup balancing Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 31/34] sched: Energy-aware wake-up task placement Morten Rasmussen
2015-05-14 14:03   ` Dietmar Eggemann
     [not found]   ` <OF168B7415.9556008C-ON48257E45.003388D7-48257E45.00349D8D@zte.com.cn>
2015-05-14 15:10     ` Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 32/34] sched: Consider a not over-utilized energy-aware system as balanced Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 33/34] sched: Enable idle balance to pull single task towards cpu with higher capacity Morten Rasmussen
2015-05-12 19:39 ` [RFCv4 PATCH 34/34] sched: Disable energy-unfriendly nohz kicks Morten Rasmussen
2015-05-12 22:07 ` [RFCv4 PATCH 00/34] sched: Energy cost model for energy-aware scheduling Sai Gurrappadi
2015-05-12 22:07   ` Sai Gurrappadi
2015-05-13 13:47   ` Morten Rasmussen
2015-06-28 20:26     ` Abel Vesa
2015-06-29  9:06       ` pang.xunlei
2015-06-29 10:19         ` Dietmar Eggemann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=OF461FAF0B.652BAC50-ON48257E74.0030491E-48257E74.0034AB06@zte.com.cn \
    --to=pang.xunlei@zte.com.cn \
    --cc=Dietmar.Eggemann@arm.com \
    --cc=Juri.Lelli@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=morten.rasmussen@arm.com \
    --cc=mturquette@linaro.org \
    --cc=pang.xunlei@linaro.org \
    --cc=peterz@infradead.org \
    --cc=preeti@linux.vnet.ibm.com \
    --cc=rjw@rjwysocki.net \
    --cc=sgurrappadi@nvidia.com \
    --cc=vincent.guittot@linaro.org \
    --cc=yuyang.du@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.