From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757346AbbCMW7s (ORCPT ); Fri, 13 Mar 2015 18:59:48 -0400 Received: from hqemgate15.nvidia.com ([216.228.121.64]:13461 "EHLO hqemgate15.nvidia.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757282AbbCMW7o (ORCPT ); Fri, 13 Mar 2015 18:59:44 -0400 X-PGP-Universal: processed; by hqnvupgp08.nvidia.com on Fri, 13 Mar 2015 15:57:29 -0700 Message-ID: <55036AA1.7000801@nvidia.com> Date: Fri, 13 Mar 2015 15:54:25 -0700 From: Sai Gurrappadi User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:24.0) Gecko/20100101 Thunderbird/24.3.0 MIME-Version: 1.0 To: Morten Rasmussen , "peterz@infradead.org" , "mingo@redhat.com" CC: "vincent.guittot@linaro.org" , Dietmar Eggemann , "yuyang.du@intel.com" , "preeti@linux.vnet.ibm.com" , "mturquette@linaro.org" , "nico@linaro.org" , "rjw@rjwysocki.net" , Juri Lelli , "linux-kernel@vger.kernel.org" , Peter Boonstoppel Subject: Re: [RFCv3 PATCH 30/48] sched: Calculate energy consumption of sched_group References: <1423074685-6336-1-git-send-email-morten.rasmussen@arm.com> <1423074685-6336-31-git-send-email-morten.rasmussen@arm.com> In-Reply-To: <1423074685-6336-31-git-send-email-morten.rasmussen@arm.com> X-NVConfidentiality: public Content-Type: text/plain; charset="windows-1252" Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On 02/04/2015 10:31 AM, Morten Rasmussen wrote: > For energy-aware load-balancing decisions it is necessary to know the > energy consumption estimates of groups of cpus. This patch introduces a > basic function, sched_group_energy(), which estimates the energy > consumption of the cpus in the group and any resources shared by the > members of the group. > > NOTE: The function has five levels of identation and breaks the 80 > character limit. Refactoring is necessary. > > cc: Ingo Molnar > cc: Peter Zijlstra > > Signed-off-by: Morten Rasmussen > --- > kernel/sched/fair.c | 143 ++++++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 143 insertions(+) > > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index 872ae0e..d12aa63 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -4609,6 +4609,149 @@ static inline bool energy_aware(void) > return sched_feat(ENERGY_AWARE); > } > > +/* > + * cpu_norm_usage() returns the cpu usage relative to it's current capacity, > + * i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE] which is useful for > + * energy calculations. Using the scale-invariant usage returned by > + * get_cpu_usage() and approximating scale-invariant usage by: > + * > + * usage ~ (curr_freq/max_freq)*1024 * capacity_orig/1024 * running_time/time > + * > + * the normalized usage can be found using capacity_curr. > + * > + * capacity_curr = capacity_orig * curr_freq/max_freq > + * > + * norm_usage = running_time/time ~ usage/capacity_curr > + */ > +static inline unsigned long cpu_norm_usage(int cpu) > +{ > + unsigned long capacity_curr = capacity_curr_of(cpu); > + > + return (get_cpu_usage(cpu) << SCHED_CAPACITY_SHIFT)/capacity_curr; > +} > + > +static unsigned group_max_usage(struct sched_group *sg) > +{ > + int i; > + int max_usage = 0; > + > + for_each_cpu(i, sched_group_cpus(sg)) > + max_usage = max(max_usage, get_cpu_usage(i)); > + > + return max_usage; > +} > + > +/* > + * group_norm_usage() returns the approximated group usage relative to it's > + * current capacity (busy ratio) in the range [0..SCHED_LOAD_SCALE] for use in > + * energy calculations. Since task executions may or may not overlap in time in > + * the group the true normalized usage is between max(cpu_norm_usage(i)) and > + * sum(cpu_norm_usage(i)) when iterating over all cpus in the group, i. The > + * latter is used as the estimate as it leads to a more pessimistic energy > + * estimate (more busy). > + */ > +static unsigned group_norm_usage(struct sched_group *sg) > +{ > + int i; > + unsigned long usage_sum = 0; > + > + for_each_cpu(i, sched_group_cpus(sg)) > + usage_sum += cpu_norm_usage(i); > + > + if (usage_sum > SCHED_CAPACITY_SCALE) > + return SCHED_CAPACITY_SCALE; > + return usage_sum; > +} > + > +static int find_new_capacity(struct sched_group *sg, > + struct sched_group_energy *sge) > +{ > + int idx; > + unsigned long util = group_max_usage(sg); > + > + for (idx = 0; idx < sge->nr_cap_states; idx++) { > + if (sge->cap_states[idx].cap >= util) > + return idx; > + } > + > + return idx; > +} > + > +/* > + * sched_group_energy(): Returns absolute energy consumption of cpus belonging > + * to the sched_group including shared resources shared only by members of the > + * group. Iterates over all cpus in the hierarchy below the sched_group starting > + * from the bottom working it's way up before going to the next cpu until all > + * cpus are covered at all levels. The current implementation is likely to > + * gather the same usage statistics multiple times. This can probably be done in > + * a faster but more complex way. > + */ > +static unsigned int sched_group_energy(struct sched_group *sg_top) > +{ > + struct sched_domain *sd; > + int cpu, total_energy = 0; > + struct cpumask visit_cpus; > + struct sched_group *sg; > + > + WARN_ON(!sg_top->sge); > + > + cpumask_copy(&visit_cpus, sched_group_cpus(sg_top)); > + > + while (!cpumask_empty(&visit_cpus)) { > + struct sched_group *sg_shared_cap = NULL; > + > + cpu = cpumask_first(&visit_cpus); > + > + /* > + * Is the group utilization affected by cpus outside this > + * sched_group? > + */ > + sd = highest_flag_domain(cpu, SD_SHARE_CAP_STATES); > + if (sd && sd->parent) > + sg_shared_cap = sd->parent->groups; The above bit looks like it avoids supporting SD_SHARE_CAP_STATES for the top level sd (!sd->parent). Is it because there is no group that spans all the CPUs spanned by this sd? It seems like sg_cap is just being used as a proxy for the cpumask of CPUs to check for max_usage. > + > + for_each_domain(cpu, sd) { > + sg = sd->groups; > + > + /* Has this sched_domain already been visited? */ > + if (sd->child && cpumask_first(sched_group_cpus(sg)) != cpu) > + break; > + > + do { > + struct sched_group *sg_cap_util; > + unsigned group_util; > + int sg_busy_energy, sg_idle_energy; > + int cap_idx; > + > + if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight) > + sg_cap_util = sg_shared_cap; > + else > + sg_cap_util = sg; > + > + cap_idx = find_new_capacity(sg_cap_util, sg->sge); > + group_util = group_norm_usage(sg); > + sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power) > + >> SCHED_CAPACITY_SHIFT; > + sg_idle_energy = ((SCHED_LOAD_SCALE-group_util) * sg->sge->idle_states[0].power) > + >> SCHED_CAPACITY_SHIFT; > + > + total_energy += sg_busy_energy + sg_idle_energy; > + > + if (!sd->child) > + cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg)); > + > + if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(sg_top))) > + goto next_cpu; > + > + } while (sg = sg->next, sg != sd->groups); > + } > +next_cpu: > + continue; > + } > + > + return total_energy; > +} > + > static int wake_wide(struct task_struct *p) > { > int factor = this_cpu_read(sd_llc_size); > -Sai