From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933571AbcLIPWv (ORCPT ); Fri, 9 Dec 2016 10:22:51 -0500 Received: from bombadil.infradead.org ([198.137.202.9]:43079 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750881AbcLIPWt (ORCPT ); Fri, 9 Dec 2016 10:22:49 -0500 Date: Fri, 9 Dec 2016 16:22:42 +0100 From: Peter Zijlstra To: Vincent Guittot Cc: mingo@kernel.org, linux-kernel@vger.kernel.org, matt@codeblueprint.co.uk, Morten.Rasmussen@arm.com, dietmar.eggemann@arm.com, kernellwp@gmail.com, yuyang.du@intel.comc, umgwanakikbuti@gmail.com Subject: Re: [PATCH 2/2 v3] sched: use load_avg for selecting idlest group Message-ID: <20161209152242.GU3124@twins.programming.kicks-ass.net> References: <1481216215-24651-1-git-send-email-vincent.guittot@linaro.org> <1481216215-24651-3-git-send-email-vincent.guittot@linaro.org> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1481216215-24651-3-git-send-email-vincent.guittot@linaro.org> User-Agent: Mutt/1.5.23.1 (2014-03-12) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Thu, Dec 08, 2016 at 05:56:54PM +0100, Vincent Guittot wrote: > @@ -5449,14 +5456,32 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, > } > > /* Adjust by relative CPU capacity of the group */ > - avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; > + avg_load = (avg_load * SCHED_CAPACITY_SCALE) / > + group->sgc->capacity; > + runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / > + group->sgc->capacity; > > if (local_group) { > - this_load = avg_load; > + this_runnable_load = runnable_load; > + this_avg_load = avg_load; > this_spare = max_spare_cap; > } else { > - if (avg_load < min_load) { > - min_load = avg_load; > + if (min_runnable_load > (runnable_load + imbalance)) { > + /* > + * The runnable load is significantly smaller > + * so we can pick this new cpu > + */ > + min_runnable_load = runnable_load; > + min_avg_load = avg_load; > + idlest = group; > + } else if ((runnable_load < (min_runnable_load + imbalance)) && > + (100*min_avg_load > imbalance_scale*avg_load)) { > + /* > + * The runnable loads are close so we take > + * into account blocked load through avg_load > + * which is blocked + runnable load > + */ > + min_avg_load = avg_load; > idlest = group; > } > > @@ -5480,13 +5505,16 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, > goto skip_spare; > > if (this_spare > task_util(p) / 2 && > - imbalance*this_spare > 100*most_spare) > + imbalance_scale*this_spare > 100*most_spare) > return NULL; > else if (most_spare > task_util(p) / 2) > return most_spare_sg; > > skip_spare: > - if (!idlest || 100*this_load < imbalance*min_load) > + if (!idlest || > + (min_runnable_load > (this_runnable_load + imbalance)) || > + ((this_runnable_load < (min_runnable_load + imbalance)) && > + (100*this_avg_load < imbalance_scale*min_avg_load))) > return NULL; > return idlest; > } I did the below on top for readability. --- --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5469,17 +5469,16 @@ find_idlest_group(struct sched_domain *s if (min_runnable_load > (runnable_load + imbalance)) { /* * The runnable load is significantly smaller - * so we can pick this new cpu + * so we can pick this new cpu */ min_runnable_load = runnable_load; min_avg_load = avg_load; idlest = group; } else if ((runnable_load < (min_runnable_load + imbalance)) && - (100*min_avg_load > imbalance_scale*avg_load)) { + (100*min_avg_load > imbalance_scale*avg_load)) { /* - * The runnable loads are close so we take - * into account blocked load through avg_load - * which is blocked + runnable load + * The runnable loads are close so take the + * blocked load into account through avg_load. */ min_avg_load = avg_load; idlest = group; @@ -5509,15 +5508,21 @@ find_idlest_group(struct sched_domain *s if (this_spare > task_util(p) / 2 && imbalance_scale*this_spare > 100*most_spare) return NULL; - else if (most_spare > task_util(p) / 2) + + if (most_spare > task_util(p) / 2) return most_spare_sg; skip_spare: - if (!idlest || - (min_runnable_load > (this_runnable_load + imbalance)) || - ((this_runnable_load < (min_runnable_load + imbalance)) && - (100*this_avg_load < imbalance_scale*min_avg_load))) + if (!idlest) + return NULL; + + if (min_runnable_load > (this_runnable_load + imbalance)) return NULL; + + if ((this_runnable_load < (min_runnable_load + imbalance)) && + (100*this_avg_load < imbalance_scale*min_avg_load)) + return NULL; + return idlest; }