linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Morten Rasmussen <morten.rasmussen@arm.com>
To: peterz@infradead.org, mingo@redhat.com
Cc: vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	yuyang.du@intel.com, preeti@linux.vnet.ibm.com,
	mturquette@linaro.org, nico@linaro.org, rjw@rjwysocki.net,
	juri.lelli@arm.com, linux-kernel@vger.kernel.org,
	Paul Turner <pjt@google.com>, Ben Segall <bsegall@google.com>
Subject: [RFCv3 PATCH 01/48] sched: add utilization_avg_contrib
Date: Wed,  4 Feb 2015 18:30:38 +0000	[thread overview]
Message-ID: <1423074685-6336-2-git-send-email-morten.rasmussen@arm.com> (raw)
In-Reply-To: <1423074685-6336-1-git-send-email-morten.rasmussen@arm.com>

From: Vincent Guittot <vincent.guittot@linaro.org>

Add new statistics which reflect the average time a task is running on the CPU
and the sum of these running time of the tasks on a runqueue. The latter is
named utilization_load_avg.

This patch is based on the usage metric that was proposed in the 1st
versions of the per-entity load tracking patchset by Paul Turner
<pjt@google.com> but that has be removed afterwards. This version differs from
the original one in the sense that it's not linked to task_group.

The rq's utilization_load_avg will be used to check if a rq is overloaded or
not instead of trying to compute how many tasks a group of CPUs can handle.

Rename runnable_avg_period into avg_period as it is now used with both
runnable_avg_sum and running_avg_sum

Add some descriptions of the variables to explain their differences

cc: Paul Turner <pjt@google.com>
cc: Ben Segall <bsegall@google.com>

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Acked-by: Morten Rasmussen <morten.rasmussen@arm.com>
---
 include/linux/sched.h | 21 ++++++++++++---
 kernel/sched/debug.c  | 10 ++++---
 kernel/sched/fair.c   | 74 ++++++++++++++++++++++++++++++++++++++++-----------
 kernel/sched/sched.h  |  8 +++++-
 4 files changed, 89 insertions(+), 24 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8db31ef..e220a91 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1111,15 +1111,28 @@ struct load_weight {
 };
 
 struct sched_avg {
+	u64 last_runnable_update;
+	s64 decay_count;
+	/*
+	 * utilization_avg_contrib describes the amount of time that a
+	 * sched_entity is running on a CPU. It is based on running_avg_sum
+	 * and is scaled in the range [0..SCHED_LOAD_SCALE].
+	 * load_avg_contrib described the amount of time that a sched_entity
+	 * is runnable on a rq. It is based on both runnable_avg_sum and the
+	 * weight of the task.
+	 */
+	unsigned long load_avg_contrib, utilization_avg_contrib;
 	/*
 	 * These sums represent an infinite geometric series and so are bound
 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
 	 * choices of y < 1-2^(-32)*1024.
+	 * running_avg_sum reflects the time that the sched_entity is
+	 * effectively running on the CPU.
+	 * runnable_avg_sum represents the amount of time a sched_entity is on
+	 * a runqueue which includes the running time that is monitored by
+	 * running_avg_sum.
 	 */
-	u32 runnable_avg_sum, runnable_avg_period;
-	u64 last_runnable_update;
-	s64 decay_count;
-	unsigned long load_avg_contrib;
+	u32 runnable_avg_sum, avg_period, running_avg_sum;
 };
 
 #ifdef CONFIG_SCHEDSTATS
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 92cc520..3033aaa 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -71,7 +71,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 	if (!se) {
 		struct sched_avg *avg = &cpu_rq(cpu)->avg;
 		P(avg->runnable_avg_sum);
-		P(avg->runnable_avg_period);
+		P(avg->avg_period);
 		return;
 	}
 
@@ -94,7 +94,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 	P(se->load.weight);
 #ifdef CONFIG_SMP
 	P(se->avg.runnable_avg_sum);
-	P(se->avg.runnable_avg_period);
+	P(se->avg.avg_period);
 	P(se->avg.load_avg_contrib);
 	P(se->avg.decay_count);
 #endif
@@ -214,6 +214,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 			cfs_rq->runnable_load_avg);
 	SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
 			cfs_rq->blocked_load_avg);
+	SEQ_printf(m, "  .%-30s: %ld\n", "utilization_load_avg",
+			cfs_rq->utilization_load_avg);
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_contrib",
 			cfs_rq->tg_load_contrib);
@@ -635,8 +637,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 	P(se.load.weight);
 #ifdef CONFIG_SMP
 	P(se.avg.runnable_avg_sum);
-	P(se.avg.runnable_avg_period);
+	P(se.avg.running_avg_sum);
+	P(se.avg.avg_period);
 	P(se.avg.load_avg_contrib);
+	P(se.avg.utilization_avg_contrib);
 	P(se.avg.decay_count);
 #endif
 	P(policy);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40667cb..29adcbb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -670,6 +670,7 @@ static int select_idle_sibling(struct task_struct *p, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
 static inline void __update_task_entity_contrib(struct sched_entity *se);
+static inline void __update_task_entity_utilization(struct sched_entity *se);
 
 /* Give new task start runnable values to heavy its load in infant time */
 void init_task_runnable_average(struct task_struct *p)
@@ -678,9 +679,10 @@ void init_task_runnable_average(struct task_struct *p)
 
 	p->se.avg.decay_count = 0;
 	slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
-	p->se.avg.runnable_avg_sum = slice;
-	p->se.avg.runnable_avg_period = slice;
+	p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = slice;
+	p->se.avg.avg_period = slice;
 	__update_task_entity_contrib(&p->se);
+	__update_task_entity_utilization(&p->se);
 }
 #else
 void init_task_runnable_average(struct task_struct *p)
@@ -1674,7 +1676,7 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
 		*period = now - p->last_task_numa_placement;
 	} else {
 		delta = p->se.avg.runnable_avg_sum;
-		*period = p->se.avg.runnable_avg_period;
+		*period = p->se.avg.avg_period;
 	}
 
 	p->last_sum_exec_runtime = runtime;
@@ -2500,7 +2502,8 @@ static u32 __compute_runnable_contrib(u64 n)
  */
 static __always_inline int __update_entity_runnable_avg(u64 now,
 							struct sched_avg *sa,
-							int runnable)
+							int runnable,
+							int running)
 {
 	u64 delta, periods;
 	u32 runnable_contrib;
@@ -2526,7 +2529,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
 	sa->last_runnable_update = now;
 
 	/* delta_w is the amount already accumulated against our next period */
-	delta_w = sa->runnable_avg_period % 1024;
+	delta_w = sa->avg_period % 1024;
 	if (delta + delta_w >= 1024) {
 		/* period roll-over */
 		decayed = 1;
@@ -2539,7 +2542,9 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
 		delta_w = 1024 - delta_w;
 		if (runnable)
 			sa->runnable_avg_sum += delta_w;
-		sa->runnable_avg_period += delta_w;
+		if (running)
+			sa->running_avg_sum += delta_w;
+		sa->avg_period += delta_w;
 
 		delta -= delta_w;
 
@@ -2549,20 +2554,26 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
 
 		sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
 						  periods + 1);
-		sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
+		sa->running_avg_sum = decay_load(sa->running_avg_sum,
+						  periods + 1);
+		sa->avg_period = decay_load(sa->avg_period,
 						     periods + 1);
 
 		/* Efficiently calculate \sum (1..n_period) 1024*y^i */
 		runnable_contrib = __compute_runnable_contrib(periods);
 		if (runnable)
 			sa->runnable_avg_sum += runnable_contrib;
-		sa->runnable_avg_period += runnable_contrib;
+		if (running)
+			sa->running_avg_sum += runnable_contrib;
+		sa->avg_period += runnable_contrib;
 	}
 
 	/* Remainder of delta accrued against u_0` */
 	if (runnable)
 		sa->runnable_avg_sum += delta;
-	sa->runnable_avg_period += delta;
+	if (running)
+		sa->running_avg_sum += delta;
+	sa->avg_period += delta;
 
 	return decayed;
 }
@@ -2578,6 +2589,8 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
 		return 0;
 
 	se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
+	se->avg.utilization_avg_contrib =
+		decay_load(se->avg.utilization_avg_contrib, decays);
 	se->avg.decay_count = 0;
 
 	return decays;
@@ -2614,7 +2627,7 @@ static inline void __update_tg_runnable_avg(struct sched_avg *sa,
 
 	/* The fraction of a cpu used by this cfs_rq */
 	contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
-			  sa->runnable_avg_period + 1);
+			  sa->avg_period + 1);
 	contrib -= cfs_rq->tg_runnable_contrib;
 
 	if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
@@ -2667,7 +2680,8 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
 
 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
 {
-	__update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
+	__update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable,
+			runnable);
 	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
 }
 #else /* CONFIG_FAIR_GROUP_SCHED */
@@ -2685,7 +2699,7 @@ static inline void __update_task_entity_contrib(struct sched_entity *se)
 
 	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
 	contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
-	contrib /= (se->avg.runnable_avg_period + 1);
+	contrib /= (se->avg.avg_period + 1);
 	se->avg.load_avg_contrib = scale_load(contrib);
 }
 
@@ -2704,6 +2718,27 @@ static long __update_entity_load_avg_contrib(struct sched_entity *se)
 	return se->avg.load_avg_contrib - old_contrib;
 }
 
+
+static inline void __update_task_entity_utilization(struct sched_entity *se)
+{
+	u32 contrib;
+
+	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
+	contrib = se->avg.running_avg_sum * scale_load_down(SCHED_LOAD_SCALE);
+	contrib /= (se->avg.avg_period + 1);
+	se->avg.utilization_avg_contrib = scale_load(contrib);
+}
+
+static long __update_entity_utilization_avg_contrib(struct sched_entity *se)
+{
+	long old_contrib = se->avg.utilization_avg_contrib;
+
+	if (entity_is_task(se))
+		__update_task_entity_utilization(se);
+
+	return se->avg.utilization_avg_contrib - old_contrib;
+}
+
 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
 						 long load_contrib)
 {
@@ -2720,7 +2755,7 @@ static inline void update_entity_load_avg(struct sched_entity *se,
 					  int update_cfs_rq)
 {
 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-	long contrib_delta;
+	long contrib_delta, utilization_delta;
 	u64 now;
 
 	/*
@@ -2732,18 +2767,22 @@ static inline void update_entity_load_avg(struct sched_entity *se,
 	else
 		now = cfs_rq_clock_task(group_cfs_rq(se));
 
-	if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
+	if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq,
+					cfs_rq->curr == se))
 		return;
 
 	contrib_delta = __update_entity_load_avg_contrib(se);
+	utilization_delta = __update_entity_utilization_avg_contrib(se);
 
 	if (!update_cfs_rq)
 		return;
 
-	if (se->on_rq)
+	if (se->on_rq) {
 		cfs_rq->runnable_load_avg += contrib_delta;
-	else
+		cfs_rq->utilization_load_avg += utilization_delta;
+	} else {
 		subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
+	}
 }
 
 /*
@@ -2818,6 +2857,7 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
 	}
 
 	cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
+	cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib;
 	/* we force update consideration on load-balancer moves */
 	update_cfs_rq_blocked_load(cfs_rq, !wakeup);
 }
@@ -2836,6 +2876,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
 	update_cfs_rq_blocked_load(cfs_rq, !sleep);
 
 	cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
+	cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib;
 	if (sleep) {
 		cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
 		se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
@@ -3173,6 +3214,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 		 */
 		update_stats_wait_end(cfs_rq, se);
 		__dequeue_entity(cfs_rq, se);
+		update_entity_load_avg(se, 1);
 	}
 
 	update_stats_curr_start(cfs_rq, se);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9a2a45c..17a3b6b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -362,8 +362,14 @@ struct cfs_rq {
 	 * Under CFS, load is tracked on a per-entity basis and aggregated up.
 	 * This allows for the description of both thread and group usage (in
 	 * the FAIR_GROUP_SCHED case).
+	 * runnable_load_avg is the sum of the load_avg_contrib of the
+	 * sched_entities on the rq.
+	 * blocked_load_avg is similar to runnable_load_avg except that its
+	 * the blocked sched_entities on the rq.
+	 * utilization_load_avg is the sum of the average running time of the
+	 * sched_entities on the rq.
 	 */
-	unsigned long runnable_load_avg, blocked_load_avg;
+	unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg;
 	atomic64_t decay_counter;
 	u64 last_decay;
 	atomic_long_t removed_load;
-- 
1.9.1


  reply	other threads:[~2015-02-04 18:30 UTC|newest]

Thread overview: 124+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-02-04 18:30 [RFCv3 PATCH 00/48] sched: Energy cost model for energy-aware scheduling Morten Rasmussen
2015-02-04 18:30 ` Morten Rasmussen [this message]
2015-02-11  8:50   ` [RFCv3 PATCH 01/48] sched: add utilization_avg_contrib Preeti U Murthy
2015-02-12  1:07     ` Vincent Guittot
2015-02-04 18:30 ` [RFCv3 PATCH 02/48] sched: Track group sched_entity usage contributions Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 03/48] sched: remove frequency scaling from cpu_capacity Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 04/48] sched: Make sched entity usage tracking frequency-invariant Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 05/48] sched: make scale_rt invariant with frequency Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 06/48] sched: add per rq cpu_capacity_orig Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 07/48] sched: get CPU's usage statistic Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 08/48] sched: replace capacity_factor by usage Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 09/48] sched: add SD_PREFER_SIBLING for SMT level Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 10/48] sched: move cfs task on a CPU with higher capacity Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 11/48] sched: Make load tracking frequency scale-invariant Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 12/48] sched: Make usage tracking cpu scale-invariant Morten Rasmussen
2015-03-23 14:46   ` Peter Zijlstra
2015-03-23 19:19     ` Dietmar Eggemann
     [not found]       ` <OF8A3E3617.0D4400A5-ON48257E3A.001B38D9-48257E3A.002379A4@zte.com.cn>
2015-05-06  9:49         ` Dietmar Eggemann
2015-02-04 18:30 ` [RFCv3 PATCH 13/48] cpufreq: Architecture specific callback for frequency changes Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 14/48] arm: Frequency invariant scheduler load-tracking support Morten Rasmussen
2015-03-23 13:39   ` Peter Zijlstra
2015-03-24  9:41     ` Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 15/48] arm: vexpress: Add CPU clock-frequencies to TC2 device-tree Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 16/48] arm: Cpu invariant scheduler load-tracking support Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 17/48] sched: Get rid of scaling usage by cpu_capacity_orig Morten Rasmussen
     [not found]   ` <OFFC493540.15A92099-ON48257E35.0026F60C-48257E35.0027A5FB@zte.com.cn>
2015-04-28 16:54     ` Dietmar Eggemann
2015-02-04 18:30 ` [RFCv3 PATCH 18/48] sched: Track blocked utilization contributions Morten Rasmussen
2015-03-23 14:08   ` Peter Zijlstra
2015-03-24  9:43     ` Morten Rasmussen
2015-03-24 16:07       ` Peter Zijlstra
2015-03-24 17:44         ` Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 19/48] sched: Include blocked utilization in usage tracking Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 20/48] sched: Documentation for scheduler energy cost model Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 21/48] sched: Make energy awareness a sched feature Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 22/48] sched: Introduce energy data structures Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 23/48] sched: Allocate and initialize " Morten Rasmussen
     [not found]   ` <OF29F384AC.37929D8E-ON48257E35.002FCB0C-48257E35.003156FE@zte.com.cn>
2015-04-29 15:43     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 24/48] sched: Introduce SD_SHARE_CAP_STATES sched_domain flag Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 25/48] arm: topology: Define TC2 energy and provide it to the scheduler Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 26/48] sched: Compute cpu capacity available at current frequency Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 27/48] sched: Relocated get_cpu_usage() Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 28/48] sched: Use capacity_curr to cap utilization in get_cpu_usage() Morten Rasmussen
2015-03-23 16:14   ` Peter Zijlstra
2015-03-24 11:36     ` Morten Rasmussen
2015-03-24 12:59       ` Peter Zijlstra
2015-02-04 18:31 ` [RFCv3 PATCH 29/48] sched: Highest energy aware balancing sched_domain level pointer Morten Rasmussen
2015-03-23 16:16   ` Peter Zijlstra
2015-03-24 10:52     ` Morten Rasmussen
     [not found]   ` <OF5977496A.A21A7B96-ON48257E35.002EC23C-48257E35.00324DAD@zte.com.cn>
2015-04-29 15:54     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 30/48] sched: Calculate energy consumption of sched_group Morten Rasmussen
2015-03-13 22:54   ` Sai Gurrappadi
2015-03-16 14:15     ` Morten Rasmussen
2015-03-23 16:47       ` Peter Zijlstra
2015-03-23 20:21         ` Dietmar Eggemann
2015-03-24 10:44           ` Morten Rasmussen
2015-03-24 16:10             ` Peter Zijlstra
2015-03-24 17:39               ` Morten Rasmussen
2015-03-26 15:23                 ` Dietmar Eggemann
2015-03-20 18:40   ` Sai Gurrappadi
2015-03-27 15:58     ` Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 31/48] sched: Extend sched_group_energy to test load-balancing decisions Morten Rasmussen
     [not found]   ` <OF081FBA75.F80B8844-ON48257E37.00261E89-48257E37.00267F24@zte.com.cn>
2015-04-30 20:26     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 32/48] sched: Estimate energy impact of scheduling decisions Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 33/48] sched: Energy-aware wake-up task placement Morten Rasmussen
2015-03-13 22:47   ` Sai Gurrappadi
2015-03-16 14:47     ` Morten Rasmussen
2015-03-18 20:15       ` Sai Gurrappadi
2015-03-27 16:37         ` Morten Rasmussen
2015-03-24 13:00       ` Peter Zijlstra
2015-03-24 15:24         ` Morten Rasmussen
2015-03-24 13:00   ` Peter Zijlstra
2015-03-24 15:42     ` Morten Rasmussen
2015-03-24 15:53       ` Peter Zijlstra
2015-03-24 17:47         ` Morten Rasmussen
2015-03-24 16:35   ` Peter Zijlstra
2015-03-25 18:01     ` Juri Lelli
2015-03-25 18:14       ` Peter Zijlstra
2015-03-26 10:21         ` Juri Lelli
2015-03-26 10:41           ` Peter Zijlstra
2015-04-27 16:01             ` Michael Turquette
2015-04-28 13:06               ` Peter Zijlstra
2015-02-04 18:31 ` [RFCv3 PATCH 34/48] sched: Bias new task wakeups towards higher capacity cpus Morten Rasmussen
2015-03-24 13:33   ` Peter Zijlstra
2015-03-25 18:18     ` Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 35/48] sched, cpuidle: Track cpuidle state index in the scheduler Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 36/48] sched: Count number of shallower idle-states in struct sched_group_energy Morten Rasmussen
2015-03-24 13:14   ` Peter Zijlstra
2015-03-24 17:13     ` Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 37/48] sched: Determine the current sched_group idle-state Morten Rasmussen
     [not found]   ` <OF1FDC99CD.22435E74-ON48257E37.001BA739-48257E37.001CA5ED@zte.com.cn>
2015-04-30 20:17     ` Dietmar Eggemann
     [not found]       ` <OF2F4202E4.8A4AF229-ON48257E38.00312CD4-48257E38.0036ADB6@zte.com.cn>
2015-05-01 15:09         ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 38/48] sched: Infrastructure to query if load balancing is energy-aware Morten Rasmussen
2015-03-24 13:41   ` Peter Zijlstra
2015-03-24 16:17     ` Dietmar Eggemann
2015-03-24 13:56   ` Peter Zijlstra
2015-03-24 16:22     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 39/48] sched: Introduce energy awareness into update_sg_lb_stats Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 40/48] sched: Introduce energy awareness into update_sd_lb_stats Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 41/48] sched: Introduce energy awareness into find_busiest_group Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 42/48] sched: Introduce energy awareness into find_busiest_queue Morten Rasmussen
2015-03-24 15:21   ` Peter Zijlstra
2015-03-24 18:04     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 43/48] sched: Introduce energy awareness into detach_tasks Morten Rasmussen
2015-03-24 15:25   ` Peter Zijlstra
2015-03-25 23:50   ` Sai Gurrappadi
2015-03-27 15:03     ` Dietmar Eggemann
     [not found]       ` <OFDCE15EEF.2F536D7F-ON48257E37.002565ED-48257E37.0027A8B9@zte.com.cn>
2015-04-30 20:35         ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 44/48] sched: Tipping point from energy-aware to conventional load balancing Morten Rasmussen
2015-03-24 15:26   ` Peter Zijlstra
2015-03-24 18:47     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 45/48] sched: Skip cpu as lb src which has one task and capacity gte the dst cpu Morten Rasmussen
2015-03-24 15:27   ` Peter Zijlstra
2015-03-25 18:44     ` Dietmar Eggemann
     [not found]       ` <OF9320540C.255228F9-ON48257E37.002A02D1-48257E37.002AB5EE@zte.com.cn>
2015-05-05 10:01         ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 46/48] sched: Turn off fast idling of cpus on a partially loaded system Morten Rasmussen
2015-03-24 16:01   ` Peter Zijlstra
2015-02-04 18:31 ` [RFCv3 PATCH 47/48] sched: Enable active migration for cpus of lower capacity Morten Rasmussen
2015-03-24 16:02   ` Peter Zijlstra
2015-02-04 18:31 ` [RFCv3 PATCH 48/48] sched: Disable energy-unfriendly nohz kicks Morten Rasmussen
2015-02-20 19:26   ` Dietmar Eggemann
2015-04-02 12:43 ` [RFCv3 PATCH 00/48] sched: Energy cost model for energy-aware scheduling Vincent Guittot
2015-04-08 13:33   ` Morten Rasmussen
2015-04-09  7:41     ` Vincent Guittot
2015-04-10 14:46       ` Morten Rasmussen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1423074685-6336-2-git-send-email-morten.rasmussen@arm.com \
    --to=morten.rasmussen@arm.com \
    --cc=bsegall@google.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=juri.lelli@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=mturquette@linaro.org \
    --cc=nico@linaro.org \
    --cc=peterz@infradead.org \
    --cc=pjt@google.com \
    --cc=preeti@linux.vnet.ibm.com \
    --cc=rjw@rjwysocki.net \
    --cc=vincent.guittot@linaro.org \
    --cc=yuyang.du@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).