All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4 0/5] sched/fair: Minor SIS optimizations
@ 2022-09-07 11:19 Abel Wu
  2022-09-07 11:19 ` [PATCH v4 1/5] sched/fair: Remove redundant check in select_idle_smt() Abel Wu
                   ` (4 more replies)
  0 siblings, 5 replies; 11+ messages in thread
From: Abel Wu @ 2022-09-07 11:19 UTC (permalink / raw)
  To: Peter Zijlstra, Mel Gorman, Vincent Guittot
  Cc: Josh Don, Chen Yu, Yicong Yang, linux-kernel, Abel Wu

v4:
 - Fomat fixes
 - Collect Acked-Bys from Mel

v3:
 - Fix an uninitialised memory access issue which can happen when switching SIS_PROP

v2:
 - Removed the part ignoring SIS_UTIL when overloaded
 - Make SIS_PROP cleanup a separate patch
 - Collect Acked-Bys from Mel

Abel Wu (5):
  sched/fair: Remove redundant check in select_idle_smt()
  sched/fair: Avoid double search on same cpu
  sched/fair: Remove useless check in select_idle_core()
  sched/fair: Default to false in test_idle_cores()
  sched/fair: Cleanup for SIS_PROP

 kernel/sched/fair.c | 42 +++++++++++++++++++-----------------------
 1 file changed, 19 insertions(+), 23 deletions(-)

-- 
2.37.3


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v4 1/5] sched/fair: Remove redundant check in select_idle_smt()
  2022-09-07 11:19 [PATCH v4 0/5] sched/fair: Minor SIS optimizations Abel Wu
@ 2022-09-07 11:19 ` Abel Wu
  2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
  2022-09-07 11:19 ` [PATCH v4 2/5] sched/fair: Avoid double search on same cpu Abel Wu
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 11+ messages in thread
From: Abel Wu @ 2022-09-07 11:19 UTC (permalink / raw)
  To: Peter Zijlstra, Mel Gorman, Vincent Guittot
  Cc: Josh Don, Chen Yu, Yicong Yang, linux-kernel, Abel Wu, Mel Gorman

If two cpus share LLC cache, then the two cores they belong to
are also in the same LLC domain.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Reviewed-by: Josh Don <joshdon@google.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
---
 kernel/sched/fair.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index efceb670e755..9657c7de5f57 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6350,14 +6350,11 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
 /*
  * Scan the local SMT mask for idle CPUs.
  */
-static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static int select_idle_smt(struct task_struct *p, int target)
 {
 	int cpu;
 
-	for_each_cpu(cpu, cpu_smt_mask(target)) {
-		if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
-		    !cpumask_test_cpu(cpu, sched_domain_span(sd)))
-			continue;
+	for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
 			return cpu;
 	}
@@ -6381,7 +6378,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
 	return __select_idle_cpu(core, p);
 }
 
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static inline int select_idle_smt(struct task_struct *p, int target)
 {
 	return -1;
 }
@@ -6615,7 +6612,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 		has_idle_core = test_idle_cores(target, false);
 
 		if (!has_idle_core && cpus_share_cache(prev, target)) {
-			i = select_idle_smt(p, sd, prev);
+			i = select_idle_smt(p, prev);
 			if ((unsigned int)i < nr_cpumask_bits)
 				return i;
 		}
-- 
2.37.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 2/5] sched/fair: Avoid double search on same cpu
  2022-09-07 11:19 [PATCH v4 0/5] sched/fair: Minor SIS optimizations Abel Wu
  2022-09-07 11:19 ` [PATCH v4 1/5] sched/fair: Remove redundant check in select_idle_smt() Abel Wu
@ 2022-09-07 11:19 ` Abel Wu
  2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
  2022-09-07 11:19 ` [PATCH v4 3/5] sched/fair: Remove useless check in select_idle_core() Abel Wu
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 11+ messages in thread
From: Abel Wu @ 2022-09-07 11:19 UTC (permalink / raw)
  To: Peter Zijlstra, Mel Gorman, Vincent Guittot
  Cc: Josh Don, Chen Yu, Yicong Yang, linux-kernel, Abel Wu, Mel Gorman

The prev cpu is checked at the beginning of SIS, and it's unlikely
to be idle before the second check in select_idle_smt(). So we'd
better focus on its SMT siblings.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Reviewed-by: Josh Don <joshdon@google.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
---
 kernel/sched/fair.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9657c7de5f57..1ad79aaaaf93 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6355,6 +6355,8 @@ static int select_idle_smt(struct task_struct *p, int target)
 	int cpu;
 
 	for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
+		if (cpu == target)
+			continue;
 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
 			return cpu;
 	}
-- 
2.37.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 3/5] sched/fair: Remove useless check in select_idle_core()
  2022-09-07 11:19 [PATCH v4 0/5] sched/fair: Minor SIS optimizations Abel Wu
  2022-09-07 11:19 ` [PATCH v4 1/5] sched/fair: Remove redundant check in select_idle_smt() Abel Wu
  2022-09-07 11:19 ` [PATCH v4 2/5] sched/fair: Avoid double search on same cpu Abel Wu
@ 2022-09-07 11:19 ` Abel Wu
  2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
  2022-09-07 11:19 ` [PATCH v4 4/5] sched/fair: Default to false in test_idle_cores() Abel Wu
  2022-09-07 11:20 ` [PATCH v4 5/5] sched/fair: Cleanup for SIS_PROP Abel Wu
  4 siblings, 1 reply; 11+ messages in thread
From: Abel Wu @ 2022-09-07 11:19 UTC (permalink / raw)
  To: Peter Zijlstra, Mel Gorman, Vincent Guittot
  Cc: Josh Don, Chen Yu, Yicong Yang, linux-kernel, Abel Wu, Mel Gorman

The function select_idle_core() only gets called when has_idle_cores
is true which can be possible only when sched_smt_present is enabled.

This change also aligns select_idle_core() with select_idle_smt() in
the way that the caller do the check if necessary.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
---
 kernel/sched/fair.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1ad79aaaaf93..03ce65068333 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6321,9 +6321,6 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
 	bool idle = true;
 	int cpu;
 
-	if (!static_branch_likely(&sched_smt_present))
-		return __select_idle_cpu(core, p);
-
 	for_each_cpu(cpu, cpu_smt_mask(core)) {
 		if (!available_idle_cpu(cpu)) {
 			idle = false;
-- 
2.37.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 4/5] sched/fair: Default to false in test_idle_cores()
  2022-09-07 11:19 [PATCH v4 0/5] sched/fair: Minor SIS optimizations Abel Wu
                   ` (2 preceding siblings ...)
  2022-09-07 11:19 ` [PATCH v4 3/5] sched/fair: Remove useless check in select_idle_core() Abel Wu
@ 2022-09-07 11:19 ` Abel Wu
  2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
  2022-09-07 11:20 ` [PATCH v4 5/5] sched/fair: Cleanup for SIS_PROP Abel Wu
  4 siblings, 1 reply; 11+ messages in thread
From: Abel Wu @ 2022-09-07 11:19 UTC (permalink / raw)
  To: Peter Zijlstra, Mel Gorman, Vincent Guittot
  Cc: Josh Don, Chen Yu, Yicong Yang, linux-kernel, Abel Wu, Mel Gorman

It's uncertain whether idle cores exist or not if shared sched-
domains are not ready, so returning "no idle cores" usually
makes sense.

While __update_idle_core() is an exception, it checks status
of this core and set hint to shared sched-domain if necessary.
So the whole logic of this function depends on the existence
of shared sched-domain, and can certainly bail out early if
it is not available.

It's somehow a little tricky, and as Josh suggested that it
should be transient while the domain isn't ready. So remove
the self-defined default value to make things more clearer.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Reviewed-by: Josh Don <joshdon@google.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
---
 kernel/sched/fair.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 03ce65068333..23b020c3d3a0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1588,11 +1588,11 @@ numa_type numa_classify(unsigned int imbalance_pct,
 
 #ifdef CONFIG_SCHED_SMT
 /* Forward declarations of select_idle_sibling helpers */
-static inline bool test_idle_cores(int cpu, bool def);
+static inline bool test_idle_cores(int cpu);
 static inline int numa_idle_core(int idle_core, int cpu)
 {
 	if (!static_branch_likely(&sched_smt_present) ||
-	    idle_core >= 0 || !test_idle_cores(cpu, false))
+	    idle_core >= 0 || !test_idle_cores(cpu))
 		return idle_core;
 
 	/*
@@ -6271,7 +6271,7 @@ static inline void set_idle_cores(int cpu, int val)
 		WRITE_ONCE(sds->has_idle_cores, val);
 }
 
-static inline bool test_idle_cores(int cpu, bool def)
+static inline bool test_idle_cores(int cpu)
 {
 	struct sched_domain_shared *sds;
 
@@ -6279,7 +6279,7 @@ static inline bool test_idle_cores(int cpu, bool def)
 	if (sds)
 		return READ_ONCE(sds->has_idle_cores);
 
-	return def;
+	return false;
 }
 
 /*
@@ -6295,7 +6295,7 @@ void __update_idle_core(struct rq *rq)
 	int cpu;
 
 	rcu_read_lock();
-	if (test_idle_cores(core, true))
+	if (test_idle_cores(core))
 		goto unlock;
 
 	for_each_cpu(cpu, cpu_smt_mask(core)) {
@@ -6367,9 +6367,9 @@ static inline void set_idle_cores(int cpu, int val)
 {
 }
 
-static inline bool test_idle_cores(int cpu, bool def)
+static inline bool test_idle_cores(int cpu)
 {
-	return def;
+	return false;
 }
 
 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
@@ -6608,7 +6608,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 		return target;
 
 	if (sched_smt_active()) {
-		has_idle_core = test_idle_cores(target, false);
+		has_idle_core = test_idle_cores(target);
 
 		if (!has_idle_core && cpus_share_cache(prev, target)) {
 			i = select_idle_smt(p, prev);
-- 
2.37.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 5/5] sched/fair: Cleanup for SIS_PROP
  2022-09-07 11:19 [PATCH v4 0/5] sched/fair: Minor SIS optimizations Abel Wu
                   ` (3 preceding siblings ...)
  2022-09-07 11:19 ` [PATCH v4 4/5] sched/fair: Default to false in test_idle_cores() Abel Wu
@ 2022-09-07 11:20 ` Abel Wu
  2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
  4 siblings, 1 reply; 11+ messages in thread
From: Abel Wu @ 2022-09-07 11:20 UTC (permalink / raw)
  To: Peter Zijlstra, Mel Gorman, Vincent Guittot
  Cc: Josh Don, Chen Yu, Yicong Yang, linux-kernel, Abel Wu

The sched-domain of this cpu is only used for some heuristics when
SIS_PROP is enabled, and it should be irrelevant whether the local
sd_llc is valid or not, since all we care about is target sd_llc
if !SIS_PROP.

Access the local domain only when there is a need.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Acked-by: Mel Gorman <mgorman@suse.de>
---
 kernel/sched/fair.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 23b020c3d3a0..7bad641faef9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6396,19 +6396,19 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
 	struct sched_domain_shared *sd_share;
 	struct rq *this_rq = this_rq();
 	int this = smp_processor_id();
-	struct sched_domain *this_sd;
+	struct sched_domain *this_sd = NULL;
 	u64 time = 0;
 
-	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
-	if (!this_sd)
-		return -1;
-
 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 
 	if (sched_feat(SIS_PROP) && !has_idle_core) {
 		u64 avg_cost, avg_idle, span_avg;
 		unsigned long now = jiffies;
 
+		this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
+		if (!this_sd)
+			return -1;
+
 		/*
 		 * If we're busy, the assumption that the last idle period
 		 * predicts the future is flawed; age away the remaining
@@ -6462,7 +6462,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
 	if (has_idle_core)
 		set_idle_cores(target, false);
 
-	if (sched_feat(SIS_PROP) && !has_idle_core) {
+	if (sched_feat(SIS_PROP) && this_sd && !has_idle_core) {
 		time = cpu_clock(this) - time;
 
 		/*
-- 
2.37.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip: sched/core] sched/fair: Cleanup for SIS_PROP
  2022-09-07 11:20 ` [PATCH v4 5/5] sched/fair: Cleanup for SIS_PROP Abel Wu
@ 2022-09-09  9:00   ` tip-bot2 for Abel Wu
  0 siblings, 0 replies; 11+ messages in thread
From: tip-bot2 for Abel Wu @ 2022-09-09  9:00 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Abel Wu, Peter Zijlstra (Intel), Mel Gorman, x86, linux-kernel

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     96c1c0cfe493a7ed549169a6f044bbb83e490fb5
Gitweb:        https://git.kernel.org/tip/96c1c0cfe493a7ed549169a6f044bbb83e490fb5
Author:        Abel Wu <wuyun.abel@bytedance.com>
AuthorDate:    Wed, 07 Sep 2022 19:20:00 +08:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Sep 2022 21:53:47 +02:00

sched/fair: Cleanup for SIS_PROP

The sched-domain of this cpu is only used for some heuristics when
SIS_PROP is enabled, and it should be irrelevant whether the local
sd_llc is valid or not, since all we care about is target sd_llc
if !SIS_PROP.

Access the local domain only when there is a need.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Mel Gorman <mgorman@suse.de>
Link: https://lore.kernel.org/r/20220907112000.1854-6-wuyun.abel@bytedance.com
---
 kernel/sched/fair.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 23b020c..7bad641 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6396,19 +6396,19 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool 
 	struct sched_domain_shared *sd_share;
 	struct rq *this_rq = this_rq();
 	int this = smp_processor_id();
-	struct sched_domain *this_sd;
+	struct sched_domain *this_sd = NULL;
 	u64 time = 0;
 
-	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
-	if (!this_sd)
-		return -1;
-
 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 
 	if (sched_feat(SIS_PROP) && !has_idle_core) {
 		u64 avg_cost, avg_idle, span_avg;
 		unsigned long now = jiffies;
 
+		this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
+		if (!this_sd)
+			return -1;
+
 		/*
 		 * If we're busy, the assumption that the last idle period
 		 * predicts the future is flawed; age away the remaining
@@ -6462,7 +6462,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool 
 	if (has_idle_core)
 		set_idle_cores(target, false);
 
-	if (sched_feat(SIS_PROP) && !has_idle_core) {
+	if (sched_feat(SIS_PROP) && this_sd && !has_idle_core) {
 		time = cpu_clock(this) - time;
 
 		/*

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip: sched/core] sched/fair: Default to false in test_idle_cores()
  2022-09-07 11:19 ` [PATCH v4 4/5] sched/fair: Default to false in test_idle_cores() Abel Wu
@ 2022-09-09  9:00   ` tip-bot2 for Abel Wu
  0 siblings, 0 replies; 11+ messages in thread
From: tip-bot2 for Abel Wu @ 2022-09-09  9:00 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Abel Wu, Peter Zijlstra (Intel), Josh Don, Mel Gorman, x86, linux-kernel

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     398ba2b0cc0a43964fe3d2dd19cb2a478f1f220b
Gitweb:        https://git.kernel.org/tip/398ba2b0cc0a43964fe3d2dd19cb2a478f1f220b
Author:        Abel Wu <wuyun.abel@bytedance.com>
AuthorDate:    Wed, 07 Sep 2022 19:19:59 +08:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Sep 2022 21:53:47 +02:00

sched/fair: Default to false in test_idle_cores()

It's uncertain whether idle cores exist or not if shared sched-
domains are not ready, so returning "no idle cores" usually
makes sense.

While __update_idle_core() is an exception, it checks status
of this core and set hint to shared sched-domain if necessary.
So the whole logic of this function depends on the existence
of shared sched-domain, and can certainly bail out early if
it is not available.

It's somehow a little tricky, and as Josh suggested that it
should be transient while the domain isn't ready. So remove
the self-defined default value to make things more clearer.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Josh Don <joshdon@google.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Link: https://lore.kernel.org/r/20220907112000.1854-5-wuyun.abel@bytedance.com
---
 kernel/sched/fair.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 03ce650..23b020c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1588,11 +1588,11 @@ numa_type numa_classify(unsigned int imbalance_pct,
 
 #ifdef CONFIG_SCHED_SMT
 /* Forward declarations of select_idle_sibling helpers */
-static inline bool test_idle_cores(int cpu, bool def);
+static inline bool test_idle_cores(int cpu);
 static inline int numa_idle_core(int idle_core, int cpu)
 {
 	if (!static_branch_likely(&sched_smt_present) ||
-	    idle_core >= 0 || !test_idle_cores(cpu, false))
+	    idle_core >= 0 || !test_idle_cores(cpu))
 		return idle_core;
 
 	/*
@@ -6271,7 +6271,7 @@ static inline void set_idle_cores(int cpu, int val)
 		WRITE_ONCE(sds->has_idle_cores, val);
 }
 
-static inline bool test_idle_cores(int cpu, bool def)
+static inline bool test_idle_cores(int cpu)
 {
 	struct sched_domain_shared *sds;
 
@@ -6279,7 +6279,7 @@ static inline bool test_idle_cores(int cpu, bool def)
 	if (sds)
 		return READ_ONCE(sds->has_idle_cores);
 
-	return def;
+	return false;
 }
 
 /*
@@ -6295,7 +6295,7 @@ void __update_idle_core(struct rq *rq)
 	int cpu;
 
 	rcu_read_lock();
-	if (test_idle_cores(core, true))
+	if (test_idle_cores(core))
 		goto unlock;
 
 	for_each_cpu(cpu, cpu_smt_mask(core)) {
@@ -6367,9 +6367,9 @@ static inline void set_idle_cores(int cpu, int val)
 {
 }
 
-static inline bool test_idle_cores(int cpu, bool def)
+static inline bool test_idle_cores(int cpu)
 {
-	return def;
+	return false;
 }
 
 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
@@ -6608,7 +6608,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 		return target;
 
 	if (sched_smt_active()) {
-		has_idle_core = test_idle_cores(target, false);
+		has_idle_core = test_idle_cores(target);
 
 		if (!has_idle_core && cpus_share_cache(prev, target)) {
 			i = select_idle_smt(p, prev);

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip: sched/core] sched/fair: Remove useless check in select_idle_core()
  2022-09-07 11:19 ` [PATCH v4 3/5] sched/fair: Remove useless check in select_idle_core() Abel Wu
@ 2022-09-09  9:00   ` tip-bot2 for Abel Wu
  0 siblings, 0 replies; 11+ messages in thread
From: tip-bot2 for Abel Wu @ 2022-09-09  9:00 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Abel Wu, Peter Zijlstra (Intel), Mel Gorman, x86, linux-kernel

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     8eeeed9c4a791f0d1f2ea830eb75a4246c117ae2
Gitweb:        https://git.kernel.org/tip/8eeeed9c4a791f0d1f2ea830eb75a4246c117ae2
Author:        Abel Wu <wuyun.abel@bytedance.com>
AuthorDate:    Wed, 07 Sep 2022 19:19:58 +08:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Sep 2022 21:53:46 +02:00

sched/fair: Remove useless check in select_idle_core()

The function select_idle_core() only gets called when has_idle_cores
is true which can be possible only when sched_smt_present is enabled.

This change also aligns select_idle_core() with select_idle_smt() in
the way that the caller do the check if necessary.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Link: https://lore.kernel.org/r/20220907112000.1854-4-wuyun.abel@bytedance.com
---
 kernel/sched/fair.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1ad79aa..03ce650 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6321,9 +6321,6 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
 	bool idle = true;
 	int cpu;
 
-	if (!static_branch_likely(&sched_smt_present))
-		return __select_idle_cpu(core, p);
-
 	for_each_cpu(cpu, cpu_smt_mask(core)) {
 		if (!available_idle_cpu(cpu)) {
 			idle = false;

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip: sched/core] sched/fair: Avoid double search on same cpu
  2022-09-07 11:19 ` [PATCH v4 2/5] sched/fair: Avoid double search on same cpu Abel Wu
@ 2022-09-09  9:00   ` tip-bot2 for Abel Wu
  0 siblings, 0 replies; 11+ messages in thread
From: tip-bot2 for Abel Wu @ 2022-09-09  9:00 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Abel Wu, Peter Zijlstra (Intel), Josh Don, Mel Gorman, x86, linux-kernel

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     b9bae70440d21e106fbc098803b5a190df65f2e0
Gitweb:        https://git.kernel.org/tip/b9bae70440d21e106fbc098803b5a190df65f2e0
Author:        Abel Wu <wuyun.abel@bytedance.com>
AuthorDate:    Wed, 07 Sep 2022 19:19:57 +08:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Sep 2022 21:53:46 +02:00

sched/fair: Avoid double search on same cpu

The prev cpu is checked at the beginning of SIS, and it's unlikely
to be idle before the second check in select_idle_smt(). So we'd
better focus on its SMT siblings.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Josh Don <joshdon@google.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Link: https://lore.kernel.org/r/20220907112000.1854-3-wuyun.abel@bytedance.com
---
 kernel/sched/fair.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9657c7d..1ad79aa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6355,6 +6355,8 @@ static int select_idle_smt(struct task_struct *p, int target)
 	int cpu;
 
 	for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
+		if (cpu == target)
+			continue;
 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
 			return cpu;
 	}

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip: sched/core] sched/fair: Remove redundant check in select_idle_smt()
  2022-09-07 11:19 ` [PATCH v4 1/5] sched/fair: Remove redundant check in select_idle_smt() Abel Wu
@ 2022-09-09  9:00   ` tip-bot2 for Abel Wu
  0 siblings, 0 replies; 11+ messages in thread
From: tip-bot2 for Abel Wu @ 2022-09-09  9:00 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Abel Wu, Peter Zijlstra (Intel), Josh Don, Mel Gorman, x86, linux-kernel

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     3e6efe87cd5ccabf0f1d4e3ef25881ca0fd337e7
Gitweb:        https://git.kernel.org/tip/3e6efe87cd5ccabf0f1d4e3ef25881ca0fd337e7
Author:        Abel Wu <wuyun.abel@bytedance.com>
AuthorDate:    Wed, 07 Sep 2022 19:19:56 +08:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Sep 2022 21:53:46 +02:00

sched/fair: Remove redundant check in select_idle_smt()

If two cpus share LLC cache, then the two cores they belong to
are also in the same LLC domain.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Josh Don <joshdon@google.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Link: https://lore.kernel.org/r/20220907112000.1854-2-wuyun.abel@bytedance.com
---
 kernel/sched/fair.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index efceb67..9657c7d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6350,14 +6350,11 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
 /*
  * Scan the local SMT mask for idle CPUs.
  */
-static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static int select_idle_smt(struct task_struct *p, int target)
 {
 	int cpu;
 
-	for_each_cpu(cpu, cpu_smt_mask(target)) {
-		if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
-		    !cpumask_test_cpu(cpu, sched_domain_span(sd)))
-			continue;
+	for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
 			return cpu;
 	}
@@ -6381,7 +6378,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
 	return __select_idle_cpu(core, p);
 }
 
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static inline int select_idle_smt(struct task_struct *p, int target)
 {
 	return -1;
 }
@@ -6615,7 +6612,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 		has_idle_core = test_idle_cores(target, false);
 
 		if (!has_idle_core && cpus_share_cache(prev, target)) {
-			i = select_idle_smt(p, sd, prev);
+			i = select_idle_smt(p, prev);
 			if ((unsigned int)i < nr_cpumask_bits)
 				return i;
 		}

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2022-09-09  9:02 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-07 11:19 [PATCH v4 0/5] sched/fair: Minor SIS optimizations Abel Wu
2022-09-07 11:19 ` [PATCH v4 1/5] sched/fair: Remove redundant check in select_idle_smt() Abel Wu
2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
2022-09-07 11:19 ` [PATCH v4 2/5] sched/fair: Avoid double search on same cpu Abel Wu
2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
2022-09-07 11:19 ` [PATCH v4 3/5] sched/fair: Remove useless check in select_idle_core() Abel Wu
2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
2022-09-07 11:19 ` [PATCH v4 4/5] sched/fair: Default to false in test_idle_cores() Abel Wu
2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu
2022-09-07 11:20 ` [PATCH v4 5/5] sched/fair: Cleanup for SIS_PROP Abel Wu
2022-09-09  9:00   ` [tip: sched/core] " tip-bot2 for Abel Wu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.