From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 632991863 for ; Wed, 28 Dec 2022 15:10:00 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id D9842C433D2; Wed, 28 Dec 2022 15:09:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=linuxfoundation.org; s=korg; t=1672240200; bh=zNy7dK0/Oxb5jdNssSMEOqWdVoQ44y/IpBndXNKaHJM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=bMdxuQd6ctaKYsaZPrb9Az/N0fjXF5FgXhasScBrxyFZgyHO4fpfmPzhD8SdGMuvQ Vm5RuJpnVKx+heZygO3/zDPqxJzYnZA8WBT6BE+nPncKv9k1Bjbh2kpiqKy6ITj7Hq q7TRWlqkiRbKt+LyuPVRGBnwRl222fSpBjwUxon0= From: Greg Kroah-Hartman To: stable@vger.kernel.org Cc: Greg Kroah-Hartman , patches@lists.linux.dev, Qais Yousef , "Peter Zijlstra (Intel)" , Sasha Levin Subject: [PATCH 6.0 0099/1073] sched/uclamp: Make asym_fits_capacity() use util_fits_cpu() Date: Wed, 28 Dec 2022 15:28:07 +0100 Message-Id: <20221228144330.738360355@linuxfoundation.org> X-Mailer: git-send-email 2.39.0 In-Reply-To: <20221228144328.162723588@linuxfoundation.org> References: <20221228144328.162723588@linuxfoundation.org> User-Agent: quilt/0.67 Precedence: bulk X-Mailing-List: patches@lists.linux.dev List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From: Qais Yousef [ Upstream commit a2e7f03ed28fce26c78b985f87913b6ce3accf9d ] Use the new util_fits_cpu() to ensure migration margin and capacity pressure are taken into account correctly when uclamp is being used otherwise we will fail to consider CPUs as fitting in scenarios where they should. s/asym_fits_capacity/asym_fits_cpu/ to better reflect what it does now. Fixes: b4c9c9f15649 ("sched/fair: Prefer prev cpu in asymmetric wakeup path") Signed-off-by: Qais Yousef Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220804143609.515789-6-qais.yousef@arm.com Signed-off-by: Sasha Levin --- kernel/sched/fair.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1fe3f3b96251..cb4d47441a41 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6631,10 +6631,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) return best_cpu; } -static inline bool asym_fits_capacity(unsigned long task_util, int cpu) +static inline bool asym_fits_cpu(unsigned long util, + unsigned long util_min, + unsigned long util_max, + int cpu) { if (sched_asym_cpucap_active()) - return fits_capacity(task_util, capacity_of(cpu)); + return util_fits_cpu(util, util_min, util_max, cpu); return true; } @@ -6646,7 +6649,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) { bool has_idle_core = false; struct sched_domain *sd; - unsigned long task_util; + unsigned long task_util, util_min, util_max; int i, recent_used_cpu; /* @@ -6655,7 +6658,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (sched_asym_cpucap_active()) { sync_entity_load_avg(&p->se); - task_util = uclamp_task_util(p); + task_util = task_util_est(p); + util_min = uclamp_eff_value(p, UCLAMP_MIN); + util_max = uclamp_eff_value(p, UCLAMP_MAX); } /* @@ -6664,7 +6669,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) lockdep_assert_irqs_disabled(); if ((available_idle_cpu(target) || sched_idle_cpu(target)) && - asym_fits_capacity(task_util, target)) + asym_fits_cpu(task_util, util_min, util_max, target)) return target; /* @@ -6672,7 +6677,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (prev != target && cpus_share_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) && - asym_fits_capacity(task_util, prev)) + asym_fits_cpu(task_util, util_min, util_max, prev)) return prev; /* @@ -6687,7 +6692,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) in_task() && prev == smp_processor_id() && this_rq()->nr_running <= 1 && - asym_fits_capacity(task_util, prev)) { + asym_fits_cpu(task_util, util_min, util_max, prev)) { return prev; } @@ -6699,7 +6704,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) cpus_share_cache(recent_used_cpu, target) && (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && - asym_fits_capacity(task_util, recent_used_cpu)) { + asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) { return recent_used_cpu; } -- 2.35.1