linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Morten Rasmussen <morten.rasmussen@arm.com>
To: peterz@infradead.org, mingo@redhat.com
Cc: vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	yuyang.du@intel.com, preeti@linux.vnet.ibm.com,
	mturquette@linaro.org, nico@linaro.org, rjw@rjwysocki.net,
	juri.lelli@arm.com, linux-kernel@vger.kernel.org
Subject: [RFCv3 PATCH 23/48] sched: Allocate and initialize energy data structures
Date: Wed,  4 Feb 2015 18:31:00 +0000	[thread overview]
Message-ID: <1423074685-6336-24-git-send-email-morten.rasmussen@arm.com> (raw)
In-Reply-To: <1423074685-6336-1-git-send-email-morten.rasmussen@arm.com>

From: Dietmar Eggemann <dietmar.eggemann@arm.com>

The per sched group sched_group_energy structure plus the related
idle_state and capacity_state arrays are allocated like the other sched
domain (sd) hierarchy data structures. This includes the freeing of
sched_group_energy structures which are not used.

One problem is that the number of elements of the idle_state and the
capacity_state arrays is not fixed and has to be retrieved in
__sdt_alloc() to allocate memory for the sched_group_energy structure and
the two arrays in one chunk. The array pointers (idle_states and
cap_states) are initialized here to point to the correct place inside the
memory chunk.

The new function init_sched_energy() initializes the sched_group_energy
structure and the two arrays in case the sd topology level contains energy
information.

cc: Ingo Molnar <mingo@redhat.com>
cc: Peter Zijlstra <peterz@infradead.org>

Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
 kernel/sched/core.c  | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 kernel/sched/sched.h | 33 ++++++++++++++++++++++++
 2 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a00a4c3..031ea48 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5707,6 +5707,7 @@ static void free_sched_domain(struct rcu_head *rcu)
 		free_sched_groups(sd->groups, 1);
 	} else if (atomic_dec_and_test(&sd->groups->ref)) {
 		kfree(sd->groups->sgc);
+		kfree(sd->groups->sge);
 		kfree(sd->groups);
 	}
 	kfree(sd);
@@ -5965,6 +5966,8 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
 		*sg = *per_cpu_ptr(sdd->sg, cpu);
 		(*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
 		atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
+		(*sg)->sge = *per_cpu_ptr(sdd->sge, cpu);
+		atomic_set(&(*sg)->sge->ref, 1); /* for claim_allocations */
 	}
 
 	return cpu;
@@ -6054,6 +6057,28 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 	atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
 }
 
+static void init_sched_energy(int cpu, struct sched_domain *sd,
+			      struct sched_domain_topology_level *tl)
+{
+	struct sched_group *sg = sd->groups;
+	struct sched_group_energy *energy = sg->sge;
+	sched_domain_energy_f fn = tl->energy;
+	struct cpumask *mask = sched_group_cpus(sg);
+
+	if (!fn || !fn(cpu))
+		return;
+
+	if (cpumask_weight(mask) > 1)
+		check_sched_energy_data(cpu, fn, mask);
+
+	energy->nr_idle_states = fn(cpu)->nr_idle_states;
+	memcpy(energy->idle_states, fn(cpu)->idle_states,
+	       energy->nr_idle_states*sizeof(struct idle_state));
+	energy->nr_cap_states = fn(cpu)->nr_cap_states;
+	memcpy(energy->cap_states, fn(cpu)->cap_states,
+	       energy->nr_cap_states*sizeof(struct capacity_state));
+}
+
 /*
  * Initializers for schedule domains
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -6144,6 +6169,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
 
 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
+
+	if (atomic_read(&(*per_cpu_ptr(sdd->sge, cpu))->ref))
+		*per_cpu_ptr(sdd->sge, cpu) = NULL;
 }
 
 #ifdef CONFIG_NUMA
@@ -6609,10 +6637,24 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
 		if (!sdd->sgc)
 			return -ENOMEM;
 
+		sdd->sge = alloc_percpu(struct sched_group_energy *);
+		if (!sdd->sge)
+			return -ENOMEM;
+
 		for_each_cpu(j, cpu_map) {
 			struct sched_domain *sd;
 			struct sched_group *sg;
 			struct sched_group_capacity *sgc;
+			struct sched_group_energy *sge;
+			sched_domain_energy_f fn = tl->energy;
+			unsigned int nr_idle_states = 0;
+			unsigned int nr_cap_states = 0;
+
+			if (fn && fn(j)) {
+				nr_idle_states = fn(j)->nr_idle_states;
+				nr_cap_states = fn(j)->nr_cap_states;
+				BUG_ON(!nr_idle_states || !nr_cap_states);
+			}
 
 		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
 					GFP_KERNEL, cpu_to_node(j));
@@ -6636,6 +6678,26 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
 				return -ENOMEM;
 
 			*per_cpu_ptr(sdd->sgc, j) = sgc;
+
+			sge = kzalloc_node(sizeof(struct sched_group_energy) +
+				nr_idle_states*sizeof(struct idle_state) +
+				nr_cap_states*sizeof(struct capacity_state),
+				GFP_KERNEL, cpu_to_node(j));
+
+			if (!sge)
+				return -ENOMEM;
+
+			sge->idle_states = (struct idle_state *)
+					   ((void *)&sge->cap_states +
+					    sizeof(sge->cap_states));
+
+			sge->cap_states = (struct capacity_state *)
+					  ((void *)&sge->cap_states +
+					   sizeof(sge->cap_states) +
+					   nr_idle_states*
+					   sizeof(struct idle_state));
+
+			*per_cpu_ptr(sdd->sge, j) = sge;
 		}
 	}
 
@@ -6664,6 +6726,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
 				kfree(*per_cpu_ptr(sdd->sg, j));
 			if (sdd->sgc)
 				kfree(*per_cpu_ptr(sdd->sgc, j));
+			if (sdd->sge)
+				kfree(*per_cpu_ptr(sdd->sge, j));
 		}
 		free_percpu(sdd->sd);
 		sdd->sd = NULL;
@@ -6671,6 +6735,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
 		sdd->sg = NULL;
 		free_percpu(sdd->sgc);
 		sdd->sgc = NULL;
+		free_percpu(sdd->sge);
+		sdd->sge = NULL;
 	}
 }
 
@@ -6756,10 +6822,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
 
 	/* Calculate CPU capacity for physical packages and nodes */
 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
+		struct sched_domain_topology_level *tl = sched_domain_topology;
+
 		if (!cpumask_test_cpu(i, cpu_map))
 			continue;
 
-		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
+			init_sched_energy(i, sd, tl);
 			claim_allocations(i, sd);
 			init_sched_groups_capacity(i, sd);
 		}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0e9dcc6..86cf6b2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -854,6 +854,39 @@ static inline unsigned int group_first_cpu(struct sched_group *group)
 
 extern int group_balance_cpu(struct sched_group *sg);
 
+/*
+ * Check that the per-cpu provided sd energy data is consistent for all cpus
+ * within the mask.
+ */
+static inline void check_sched_energy_data(int cpu, sched_domain_energy_f fn,
+					   const struct cpumask *cpumask)
+{
+	struct cpumask mask;
+	int i;
+
+	cpumask_xor(&mask, cpumask, get_cpu_mask(cpu));
+
+	for_each_cpu(i, &mask) {
+		int y;
+
+		BUG_ON(fn(i)->nr_idle_states != fn(cpu)->nr_idle_states);
+
+		for (y = 0; y < (fn(i)->nr_idle_states); y++) {
+			BUG_ON(fn(i)->idle_states[y].power !=
+					fn(cpu)->idle_states[y].power);
+		}
+
+		BUG_ON(fn(i)->nr_cap_states != fn(cpu)->nr_cap_states);
+
+		for (y = 0; y < (fn(i)->nr_cap_states); y++) {
+			BUG_ON(fn(i)->cap_states[y].cap !=
+					fn(cpu)->cap_states[y].cap);
+			BUG_ON(fn(i)->cap_states[y].power !=
+					fn(cpu)->cap_states[y].power);
+		}
+	}
+}
+
 #else
 
 static inline void sched_ttwu_pending(void) { }
-- 
1.9.1


  parent reply	other threads:[~2015-02-04 18:31 UTC|newest]

Thread overview: 124+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-02-04 18:30 [RFCv3 PATCH 00/48] sched: Energy cost model for energy-aware scheduling Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 01/48] sched: add utilization_avg_contrib Morten Rasmussen
2015-02-11  8:50   ` Preeti U Murthy
2015-02-12  1:07     ` Vincent Guittot
2015-02-04 18:30 ` [RFCv3 PATCH 02/48] sched: Track group sched_entity usage contributions Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 03/48] sched: remove frequency scaling from cpu_capacity Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 04/48] sched: Make sched entity usage tracking frequency-invariant Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 05/48] sched: make scale_rt invariant with frequency Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 06/48] sched: add per rq cpu_capacity_orig Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 07/48] sched: get CPU's usage statistic Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 08/48] sched: replace capacity_factor by usage Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 09/48] sched: add SD_PREFER_SIBLING for SMT level Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 10/48] sched: move cfs task on a CPU with higher capacity Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 11/48] sched: Make load tracking frequency scale-invariant Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 12/48] sched: Make usage tracking cpu scale-invariant Morten Rasmussen
2015-03-23 14:46   ` Peter Zijlstra
2015-03-23 19:19     ` Dietmar Eggemann
     [not found]       ` <OF8A3E3617.0D4400A5-ON48257E3A.001B38D9-48257E3A.002379A4@zte.com.cn>
2015-05-06  9:49         ` Dietmar Eggemann
2015-02-04 18:30 ` [RFCv3 PATCH 13/48] cpufreq: Architecture specific callback for frequency changes Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 14/48] arm: Frequency invariant scheduler load-tracking support Morten Rasmussen
2015-03-23 13:39   ` Peter Zijlstra
2015-03-24  9:41     ` Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 15/48] arm: vexpress: Add CPU clock-frequencies to TC2 device-tree Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 16/48] arm: Cpu invariant scheduler load-tracking support Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 17/48] sched: Get rid of scaling usage by cpu_capacity_orig Morten Rasmussen
     [not found]   ` <OFFC493540.15A92099-ON48257E35.0026F60C-48257E35.0027A5FB@zte.com.cn>
2015-04-28 16:54     ` Dietmar Eggemann
2015-02-04 18:30 ` [RFCv3 PATCH 18/48] sched: Track blocked utilization contributions Morten Rasmussen
2015-03-23 14:08   ` Peter Zijlstra
2015-03-24  9:43     ` Morten Rasmussen
2015-03-24 16:07       ` Peter Zijlstra
2015-03-24 17:44         ` Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 19/48] sched: Include blocked utilization in usage tracking Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 20/48] sched: Documentation for scheduler energy cost model Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 21/48] sched: Make energy awareness a sched feature Morten Rasmussen
2015-02-04 18:30 ` [RFCv3 PATCH 22/48] sched: Introduce energy data structures Morten Rasmussen
2015-02-04 18:31 ` Morten Rasmussen [this message]
     [not found]   ` <OF29F384AC.37929D8E-ON48257E35.002FCB0C-48257E35.003156FE@zte.com.cn>
2015-04-29 15:43     ` [RFCv3 PATCH 23/48] sched: Allocate and initialize " Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 24/48] sched: Introduce SD_SHARE_CAP_STATES sched_domain flag Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 25/48] arm: topology: Define TC2 energy and provide it to the scheduler Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 26/48] sched: Compute cpu capacity available at current frequency Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 27/48] sched: Relocated get_cpu_usage() Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 28/48] sched: Use capacity_curr to cap utilization in get_cpu_usage() Morten Rasmussen
2015-03-23 16:14   ` Peter Zijlstra
2015-03-24 11:36     ` Morten Rasmussen
2015-03-24 12:59       ` Peter Zijlstra
2015-02-04 18:31 ` [RFCv3 PATCH 29/48] sched: Highest energy aware balancing sched_domain level pointer Morten Rasmussen
2015-03-23 16:16   ` Peter Zijlstra
2015-03-24 10:52     ` Morten Rasmussen
     [not found]   ` <OF5977496A.A21A7B96-ON48257E35.002EC23C-48257E35.00324DAD@zte.com.cn>
2015-04-29 15:54     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 30/48] sched: Calculate energy consumption of sched_group Morten Rasmussen
2015-03-13 22:54   ` Sai Gurrappadi
2015-03-16 14:15     ` Morten Rasmussen
2015-03-23 16:47       ` Peter Zijlstra
2015-03-23 20:21         ` Dietmar Eggemann
2015-03-24 10:44           ` Morten Rasmussen
2015-03-24 16:10             ` Peter Zijlstra
2015-03-24 17:39               ` Morten Rasmussen
2015-03-26 15:23                 ` Dietmar Eggemann
2015-03-20 18:40   ` Sai Gurrappadi
2015-03-27 15:58     ` Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 31/48] sched: Extend sched_group_energy to test load-balancing decisions Morten Rasmussen
     [not found]   ` <OF081FBA75.F80B8844-ON48257E37.00261E89-48257E37.00267F24@zte.com.cn>
2015-04-30 20:26     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 32/48] sched: Estimate energy impact of scheduling decisions Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 33/48] sched: Energy-aware wake-up task placement Morten Rasmussen
2015-03-13 22:47   ` Sai Gurrappadi
2015-03-16 14:47     ` Morten Rasmussen
2015-03-18 20:15       ` Sai Gurrappadi
2015-03-27 16:37         ` Morten Rasmussen
2015-03-24 13:00       ` Peter Zijlstra
2015-03-24 15:24         ` Morten Rasmussen
2015-03-24 13:00   ` Peter Zijlstra
2015-03-24 15:42     ` Morten Rasmussen
2015-03-24 15:53       ` Peter Zijlstra
2015-03-24 17:47         ` Morten Rasmussen
2015-03-24 16:35   ` Peter Zijlstra
2015-03-25 18:01     ` Juri Lelli
2015-03-25 18:14       ` Peter Zijlstra
2015-03-26 10:21         ` Juri Lelli
2015-03-26 10:41           ` Peter Zijlstra
2015-04-27 16:01             ` Michael Turquette
2015-04-28 13:06               ` Peter Zijlstra
2015-02-04 18:31 ` [RFCv3 PATCH 34/48] sched: Bias new task wakeups towards higher capacity cpus Morten Rasmussen
2015-03-24 13:33   ` Peter Zijlstra
2015-03-25 18:18     ` Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 35/48] sched, cpuidle: Track cpuidle state index in the scheduler Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 36/48] sched: Count number of shallower idle-states in struct sched_group_energy Morten Rasmussen
2015-03-24 13:14   ` Peter Zijlstra
2015-03-24 17:13     ` Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 37/48] sched: Determine the current sched_group idle-state Morten Rasmussen
     [not found]   ` <OF1FDC99CD.22435E74-ON48257E37.001BA739-48257E37.001CA5ED@zte.com.cn>
2015-04-30 20:17     ` Dietmar Eggemann
     [not found]       ` <OF2F4202E4.8A4AF229-ON48257E38.00312CD4-48257E38.0036ADB6@zte.com.cn>
2015-05-01 15:09         ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 38/48] sched: Infrastructure to query if load balancing is energy-aware Morten Rasmussen
2015-03-24 13:41   ` Peter Zijlstra
2015-03-24 16:17     ` Dietmar Eggemann
2015-03-24 13:56   ` Peter Zijlstra
2015-03-24 16:22     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 39/48] sched: Introduce energy awareness into update_sg_lb_stats Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 40/48] sched: Introduce energy awareness into update_sd_lb_stats Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 41/48] sched: Introduce energy awareness into find_busiest_group Morten Rasmussen
2015-02-04 18:31 ` [RFCv3 PATCH 42/48] sched: Introduce energy awareness into find_busiest_queue Morten Rasmussen
2015-03-24 15:21   ` Peter Zijlstra
2015-03-24 18:04     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 43/48] sched: Introduce energy awareness into detach_tasks Morten Rasmussen
2015-03-24 15:25   ` Peter Zijlstra
2015-03-25 23:50   ` Sai Gurrappadi
2015-03-27 15:03     ` Dietmar Eggemann
     [not found]       ` <OFDCE15EEF.2F536D7F-ON48257E37.002565ED-48257E37.0027A8B9@zte.com.cn>
2015-04-30 20:35         ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 44/48] sched: Tipping point from energy-aware to conventional load balancing Morten Rasmussen
2015-03-24 15:26   ` Peter Zijlstra
2015-03-24 18:47     ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 45/48] sched: Skip cpu as lb src which has one task and capacity gte the dst cpu Morten Rasmussen
2015-03-24 15:27   ` Peter Zijlstra
2015-03-25 18:44     ` Dietmar Eggemann
     [not found]       ` <OF9320540C.255228F9-ON48257E37.002A02D1-48257E37.002AB5EE@zte.com.cn>
2015-05-05 10:01         ` Dietmar Eggemann
2015-02-04 18:31 ` [RFCv3 PATCH 46/48] sched: Turn off fast idling of cpus on a partially loaded system Morten Rasmussen
2015-03-24 16:01   ` Peter Zijlstra
2015-02-04 18:31 ` [RFCv3 PATCH 47/48] sched: Enable active migration for cpus of lower capacity Morten Rasmussen
2015-03-24 16:02   ` Peter Zijlstra
2015-02-04 18:31 ` [RFCv3 PATCH 48/48] sched: Disable energy-unfriendly nohz kicks Morten Rasmussen
2015-02-20 19:26   ` Dietmar Eggemann
2015-04-02 12:43 ` [RFCv3 PATCH 00/48] sched: Energy cost model for energy-aware scheduling Vincent Guittot
2015-04-08 13:33   ` Morten Rasmussen
2015-04-09  7:41     ` Vincent Guittot
2015-04-10 14:46       ` Morten Rasmussen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1423074685-6336-24-git-send-email-morten.rasmussen@arm.com \
    --to=morten.rasmussen@arm.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=juri.lelli@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=mturquette@linaro.org \
    --cc=nico@linaro.org \
    --cc=peterz@infradead.org \
    --cc=preeti@linux.vnet.ibm.com \
    --cc=rjw@rjwysocki.net \
    --cc=vincent.guittot@linaro.org \
    --cc=yuyang.du@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).