All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: mgorman@techsingularity.net, vincent.guittot@linaro.org
Cc: peterz@infradead.org, linux-kernel@vger.kernel.org,
	aubrey.li@linux.intel.com, mingo@redhat.com,
	juri.lelli@redhat.com, valentin.schneider@arm.com,
	qais.yousef@arm.com, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com,
	tim.c.chen@linux.intel.com, benbjiang@gmail.com
Subject: [RFC][PATCH 5/5] sched/fair: SIS_PROP the idle core scan
Date: Mon, 14 Dec 2020 17:48:27 +0100	[thread overview]
Message-ID: <20201214170018.126628741@infradead.org> (raw)
In-Reply-To: 20201214164822.402812729@infradead.org

Further unify the new select_idle_cpu() loop and remove the 'smt'
selection code and unconditionally use SIS_PROP, even for idle core
searches.

This effectively brings back the effects of select_idle_smt() which we
removed a few patches ago due to always iterating the target core.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/sched/topology.h |    1 
 kernel/sched/fair.c            |   90 +++--------------------------------------
 kernel/sched/idle.c            |    1 
 kernel/sched/sched.h           |   13 -----
 4 files changed, 7 insertions(+), 98 deletions(-)

--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -73,7 +73,6 @@ struct sched_group;
 struct sched_domain_shared {
 	atomic_t	ref;
 	atomic_t	nr_busy_cpus;
-	int		has_idle_cores;
 };
 
 struct sched_domain {
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1581,11 +1581,9 @@ numa_type numa_classify(unsigned int imb
 
 #ifdef CONFIG_SCHED_SMT
 /* Forward declarations of select_idle_sibling helpers */
-static inline bool test_idle_cores(int cpu, bool def);
 static inline int numa_idle_core(int idle_core, int cpu)
 {
-	if (!static_branch_likely(&sched_smt_present) ||
-	    idle_core >= 0 || !test_idle_cores(cpu, false))
+	if (!static_branch_likely(&sched_smt_present) || idle_core >= 0)
 		return idle_core;
 
 	/*
@@ -6020,60 +6018,6 @@ EXPORT_SYMBOL_GPL(sched_smt_present);
 
 int sched_smt_weight = 1;
 
-static inline void set_idle_cores(int cpu, int val)
-{
-	struct sched_domain_shared *sds;
-
-	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
-	if (sds)
-		WRITE_ONCE(sds->has_idle_cores, val);
-}
-
-static inline bool test_idle_cores(int cpu, bool def)
-{
-	struct sched_domain_shared *sds;
-
-	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
-	if (sds)
-		return READ_ONCE(sds->has_idle_cores);
-
-	return def;
-}
-
-/*
- * Scans the local SMT mask to see if the entire core is idle, and records this
- * information in sd_llc_shared->has_idle_cores.
- *
- * Since SMT siblings share all cache levels, inspecting this limited remote
- * state should be fairly cheap.
- */
-void __update_idle_core(struct rq *rq)
-{
-	int core = cpu_of(rq);
-	int cpu;
-
-	rcu_read_lock();
-	if (test_idle_cores(core, true))
-		goto unlock;
-
-	for_each_cpu(cpu, cpu_smt_mask(core)) {
-		if (cpu == core)
-			continue;
-
-		if (!available_idle_cpu(cpu))
-			goto unlock;
-	}
-
-	set_idle_cores(core, 1);
-unlock:
-	rcu_read_unlock();
-}
-
-/*
- * Scan the entire LLC domain for idle cores; this dynamically switches off if
- * there are no idle cores left in the system; tracked through
- * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
- */
 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
 {
 	bool idle = true;
@@ -6109,15 +6053,6 @@ static int select_idle_core(struct task_
 
 #define sched_smt_weight	1
 
-static inline void set_idle_cores(int cpu, int val)
-{
-}
-
-static inline bool test_idle_cores(int cpu, bool def)
-{
-	return def;
-}
-
 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
 {
 	return __select_idle_cpu(p, core, cpus, idle_cpu);
@@ -6136,7 +6071,6 @@ static int select_idle_cpu(struct task_s
 {
 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
 	int i, cpu, idle_cpu = -1, loops = 1, nr = INT_MAX;
-	bool smt = test_idle_cores(target, false);
 	int this = smp_processor_id();
 	struct sched_domain *this_sd;
 	u64 time;
@@ -6147,7 +6081,7 @@ static int select_idle_cpu(struct task_s
 
 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 
-	if (sched_feat(SIS_PROP) && !smt) {
+	if (sched_feat(SIS_PROP)) {
 		u64 avg_cost, avg_idle, span_avg;
 
 		/*
@@ -6169,17 +6103,10 @@ static int select_idle_cpu(struct task_s
 	}
 
 	for_each_cpu_wrap(cpu, cpus, target) {
-		if (smt) {
-			i = select_idle_core(p, cpu, cpus, &idle_cpu);
-			if ((unsigned)i < nr_cpumask_bits)
-				return i;
-
-		} else {
-			i = __select_idle_cpu(p, cpu, cpus, &idle_cpu);
-			if ((unsigned)i < nr_cpumask_bits) {
-				idle_cpu = i;
-				break;
-			}
+		i = select_idle_core(p, cpu, cpus, &idle_cpu);
+		if ((unsigned)i < nr_cpumask_bits) {
+			idle_cpu = i;
+			break;
 		}
 
 		if (loops >= nr)
@@ -6188,10 +6115,7 @@ static int select_idle_cpu(struct task_s
 		loops++;
 	}
 
-	if (smt)
-		set_idle_cores(this, false);
-
-	if (sched_feat(SIS_PROP) && !smt) {
+	if (sched_feat(SIS_PROP)) {
 		time = cpu_clock(this) - time;
 		time = div_u64(time, loops);
 		update_avg(&this_sd->avg_scan_cost, time);
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -428,7 +428,6 @@ static void put_prev_task_idle(struct rq
 
 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
 {
-	update_idle_core(rq);
 	schedstat_inc(rq->sched_goidle);
 }
 
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1099,19 +1099,6 @@ static inline bool is_migration_disabled
 #endif
 }
 
-#ifdef CONFIG_SCHED_SMT
-extern void __update_idle_core(struct rq *rq);
-
-static inline void update_idle_core(struct rq *rq)
-{
-	if (static_branch_unlikely(&sched_smt_present))
-		__update_idle_core(rq);
-}
-
-#else
-static inline void update_idle_core(struct rq *rq) { }
-#endif
-
 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))



  parent reply	other threads:[~2020-12-14 17:08 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-14 16:48 [RFC][PATCH 0/5] select_idle_sibling() wreckage Peter Zijlstra
2020-12-14 16:48 ` [RFC][PATCH 1/5] sched/fair: Fix select_idle_cpu()s cost accounting Peter Zijlstra
2020-12-15  3:36   ` Li, Aubrey
2020-12-15  7:59     ` Peter Zijlstra
2020-12-15 11:45       ` Mel Gorman
2020-12-15 12:13       ` Li, Aubrey
2021-01-08 10:27       ` Mel Gorman
2021-01-08 13:01         ` Qais Yousef
2021-01-08 13:47           ` Mel Gorman
2021-01-08 13:41         ` Vincent Guittot
2021-01-08 14:40           ` Mel Gorman
2021-01-08 15:10             ` Vincent Guittot
2021-01-08 16:14               ` Mel Gorman
2021-01-11 14:36                 ` Vincent Guittot
2021-01-11 15:58                   ` Mel Gorman
2021-01-08 19:45               ` Peter Zijlstra
2021-01-09 14:12                 ` Mel Gorman
2021-01-11 14:39                 ` Vincent Guittot
2021-01-08 19:49               ` Peter Zijlstra
2021-01-11 14:52                 ` Vincent Guittot
2021-01-08 20:21         ` Peter Zijlstra
2021-01-09 13:59           ` Mel Gorman
2020-12-14 16:48 ` [RFC][PATCH 2/5] sched/fair: Make select_idle_cpu() proportional to cores Peter Zijlstra
2020-12-23 13:31   ` Vincent Guittot
2020-12-14 16:48 ` [RFC][PATCH 3/5] sched/fair: Remove select_idle_smt() Peter Zijlstra
2020-12-14 16:48 ` [RFC][PATCH 4/5] sched/fair: Merge select_idle_core/cpu() Peter Zijlstra
2020-12-14 16:48 ` Peter Zijlstra [this message]
2020-12-16 12:59 ` [RFC][PATCH 0/5] select_idle_sibling() wreckage Li, Aubrey
2020-12-16 18:07   ` Vincent Guittot
2020-12-23 13:23     ` Vincent Guittot
2021-01-04 15:40       ` Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201214170018.126628741@infradead.org \
    --to=peterz@infradead.org \
    --cc=aubrey.li@linux.intel.com \
    --cc=benbjiang@gmail.com \
    --cc=bsegall@google.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@redhat.com \
    --cc=qais.yousef@arm.com \
    --cc=rostedt@goodmis.org \
    --cc=tim.c.chen@linux.intel.com \
    --cc=valentin.schneider@arm.com \
    --cc=vincent.guittot@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.