From: vpillai <vpillai@digitalocean.com>
To: Nishanth Aravamudan <naravamudan@digitalocean.com>,
Julien Desfossez <jdesfossez@digitalocean.com>,
Peter Zijlstra <peterz@infradead.org>,
Tim Chen <tim.c.chen@linux.intel.com>,
mingo@kernel.org, tglx@linutronix.de, pjt@google.com,
torvalds@linux-foundation.org
Cc: linux-kernel@vger.kernel.org, fweisbec@gmail.com,
keescook@chromium.org, kerrnel@google.com,
Phil Auld <pauld@redhat.com>, Aaron Lu <aaron.lwe@gmail.com>,
Aubrey Li <aubrey.intel@gmail.com>,
aubrey.li@linux.intel.com,
Valentin Schneider <valentin.schneider@arm.com>,
Mel Gorman <mgorman@techsingularity.net>,
Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Joel Fernandes <joelaf@google.com>,
joel@joelfernandes.org,
Vineeth Remanan Pillai <vpillai@digitalocean.com>
Subject: [RFC PATCH 03/13] sched: Core-wide rq->lock
Date: Wed, 4 Mar 2020 16:59:53 +0000 [thread overview]
Message-ID: <855831b59e1b3774b11c3e33050eac4cc4639f06.1583332765.git.vpillai@digitalocean.com> (raw)
In-Reply-To: <cover.1583332764.git.vpillai@digitalocean.com>
In-Reply-To: <cover.1583332764.git.vpillai@digitalocean.com>
From: Peter Zijlstra <peterz@infradead.org>
Introduce the basic infrastructure to have a core wide rq->lock.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Julien Desfossez <jdesfossez@digitalocean.com>
Signed-off-by: Vineeth Remanan Pillai <vpillai@digitalocean.com>
---
kernel/Kconfig.preempt | 6 +++
kernel/sched/core.c | 113 ++++++++++++++++++++++++++++++++++++++++-
kernel/sched/sched.h | 31 +++++++++++
3 files changed, 148 insertions(+), 2 deletions(-)
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index bf82259cff96..577c288e81e5 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -80,3 +80,9 @@ config PREEMPT_COUNT
config PREEMPTION
bool
select PREEMPT_COUNT
+
+config SCHED_CORE
+ bool
+ default y
+ depends on SCHED_SMT
+
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 28ba9b56dd8a..ba17ff8a8663 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -73,6 +73,70 @@ __read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
+#ifdef CONFIG_SCHED_CORE
+
+DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
+
+/*
+ * The static-key + stop-machine variable are needed such that:
+ *
+ * spin_lock(rq_lockp(rq));
+ * ...
+ * spin_unlock(rq_lockp(rq));
+ *
+ * ends up locking and unlocking the _same_ lock, and all CPUs
+ * always agree on what rq has what lock.
+ *
+ * XXX entirely possible to selectively enable cores, don't bother for now.
+ */
+static int __sched_core_stopper(void *data)
+{
+ bool enabled = !!(unsigned long)data;
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ cpu_rq(cpu)->core_enabled = enabled;
+
+ return 0;
+}
+
+static DEFINE_MUTEX(sched_core_mutex);
+static int sched_core_count;
+
+static void __sched_core_enable(void)
+{
+ // XXX verify there are no cookie tasks (yet)
+
+ static_branch_enable(&__sched_core_enabled);
+ stop_machine(__sched_core_stopper, (void *)true, NULL);
+}
+
+static void __sched_core_disable(void)
+{
+ // XXX verify there are no cookie tasks (left)
+
+ stop_machine(__sched_core_stopper, (void *)false, NULL);
+ static_branch_disable(&__sched_core_enabled);
+}
+
+void sched_core_get(void)
+{
+ mutex_lock(&sched_core_mutex);
+ if (!sched_core_count++)
+ __sched_core_enable();
+ mutex_unlock(&sched_core_mutex);
+}
+
+void sched_core_put(void)
+{
+ mutex_lock(&sched_core_mutex);
+ if (!--sched_core_count)
+ __sched_core_disable();
+ mutex_unlock(&sched_core_mutex);
+}
+
+#endif /* CONFIG_SCHED_CORE */
+
/*
* __task_rq_lock - lock the rq @p resides on.
*/
@@ -6400,8 +6464,15 @@ int sched_cpu_activate(unsigned int cpu)
/*
* When going up, increment the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
static_branch_inc_cpuslocked(&sched_smt_present);
+#ifdef CONFIG_SCHED_CORE
+ if (static_branch_unlikely(&__sched_core_enabled)) {
+ rq->core_enabled = true;
+ }
+#endif
+ }
+
#endif
set_cpu_active(cpu, true);
@@ -6447,8 +6518,16 @@ int sched_cpu_deactivate(unsigned int cpu)
/*
* When going down, decrement the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
+#ifdef CONFIG_SCHED_CORE
+ struct rq *rq = cpu_rq(cpu);
+ if (static_branch_unlikely(&__sched_core_enabled)) {
+ rq->core_enabled = false;
+ }
+#endif
static_branch_dec_cpuslocked(&sched_smt_present);
+
+ }
#endif
if (!sched_smp_initialized)
@@ -6473,6 +6552,28 @@ static void sched_rq_cpu_starting(unsigned int cpu)
int sched_cpu_starting(unsigned int cpu)
{
+#ifdef CONFIG_SCHED_CORE
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+ struct rq *rq, *core_rq = NULL;
+ int i;
+
+ for_each_cpu(i, smt_mask) {
+ rq = cpu_rq(i);
+ if (rq->core && rq->core == rq)
+ core_rq = rq;
+ }
+
+ if (!core_rq)
+ core_rq = cpu_rq(cpu);
+
+ for_each_cpu(i, smt_mask) {
+ rq = cpu_rq(i);
+
+ WARN_ON_ONCE(rq->core && rq->core != core_rq);
+ rq->core = core_rq;
+ }
+#endif /* CONFIG_SCHED_CORE */
+
sched_rq_cpu_starting(cpu);
sched_tick_start(cpu);
return 0;
@@ -6501,6 +6602,9 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(rq);
hrtick_clear(rq);
+#ifdef CONFIG_SCHED_CORE
+ rq->core = NULL;
+#endif
return 0;
}
#endif
@@ -6695,6 +6799,11 @@ void __init sched_init(void)
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
atomic_set(&rq->nr_iowait, 0);
+
+#ifdef CONFIG_SCHED_CORE
+ rq->core = NULL;
+ rq->core_enabled = 0;
+#endif
}
set_load_weight(&init_task, false);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a8335e3078ab..a3941b2ee29e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -999,6 +999,12 @@ struct rq {
/* Must be inspected within a rcu lock section */
struct cpuidle_state *idle_state;
#endif
+
+#ifdef CONFIG_SCHED_CORE
+ /* per rq */
+ struct rq *core;
+ unsigned int core_enabled;
+#endif
};
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1026,11 +1032,36 @@ static inline int cpu_of(struct rq *rq)
#endif
}
+#ifdef CONFIG_SCHED_CORE
+DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
+
+static inline bool sched_core_enabled(struct rq *rq)
+{
+ return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
+}
+
+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
+{
+ if (sched_core_enabled(rq))
+ return &rq->core->__lock;
+
+ return &rq->__lock;
+}
+
+#else /* !CONFIG_SCHED_CORE */
+
+static inline bool sched_core_enabled(struct rq *rq)
+{
+ return false;
+}
+
static inline raw_spinlock_t *rq_lockp(struct rq *rq)
{
return &rq->__lock;
}
+#endif /* CONFIG_SCHED_CORE */
+
#ifdef CONFIG_SCHED_SMT
extern void __update_idle_core(struct rq *rq);
--
2.17.1
next prev parent reply other threads:[~2020-03-04 17:00 UTC|newest]
Thread overview: 110+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-04 16:59 [RFC PATCH 00/13] Core scheduling v5 vpillai
2020-03-04 16:59 ` [RFC PATCH 01/13] sched: Wrap rq::lock access vpillai
2020-03-04 16:59 ` [RFC PATCH 02/13] sched: Introduce sched_class::pick_task() vpillai
2020-03-04 16:59 ` vpillai [this message]
2020-04-01 11:42 ` [PATCH] sched/arm64: store cpu topology before notify_cpu_starting Cheng Jian
2020-04-01 13:23 ` Valentin Schneider
2020-04-06 8:00 ` chengjian (D)
2020-04-09 9:59 ` Sudeep Holla
2020-04-09 10:32 ` Valentin Schneider
2020-04-09 11:08 ` Sudeep Holla
2020-04-09 17:54 ` Joel Fernandes
2020-04-10 13:49 ` chengjian (D)
2020-04-14 11:36 ` [RFC PATCH 03/13] sched: Core-wide rq->lock Peter Zijlstra
2020-04-14 21:35 ` Vineeth Remanan Pillai
2020-04-15 10:55 ` Peter Zijlstra
2020-04-14 14:32 ` Peter Zijlstra
2020-03-04 16:59 ` [RFC PATCH 04/13] sched/fair: Add a few assertions vpillai
2020-03-04 16:59 ` [RFC PATCH 05/13] sched: Basic tracking of matching tasks vpillai
2020-03-04 16:59 ` [RFC PATCH 06/13] sched: Update core scheduler queue when taking cpu online/offline vpillai
2020-03-04 16:59 ` [RFC PATCH 07/13] sched: Add core wide task selection and scheduling vpillai
2020-04-14 13:35 ` Peter Zijlstra
2020-04-16 23:32 ` Tim Chen
2020-04-17 10:57 ` Peter Zijlstra
2020-04-16 3:39 ` Chen Yu
2020-04-16 19:59 ` Vineeth Remanan Pillai
2020-04-17 11:18 ` Peter Zijlstra
2020-04-19 15:31 ` Chen Yu
2020-05-21 23:14 ` Joel Fernandes
2020-05-21 23:16 ` Joel Fernandes
2020-05-22 2:35 ` Joel Fernandes
2020-05-22 3:44 ` Aaron Lu
2020-05-22 20:13 ` Joel Fernandes
2020-03-04 16:59 ` [RFC PATCH 08/13] sched/fair: wrapper for cfs_rq->min_vruntime vpillai
2020-03-04 16:59 ` [RFC PATCH 09/13] sched/fair: core wide vruntime comparison vpillai
2020-04-14 13:56 ` Peter Zijlstra
2020-04-15 3:34 ` Aaron Lu
2020-04-15 4:07 ` Aaron Lu
2020-04-15 21:24 ` Vineeth Remanan Pillai
2020-04-17 9:40 ` Aaron Lu
2020-04-20 8:07 ` [PATCH updated] sched/fair: core wide cfs task priority comparison Aaron Lu
2020-04-20 22:26 ` Vineeth Remanan Pillai
2020-04-21 2:51 ` Aaron Lu
2020-04-24 14:24 ` [PATCH updated v2] " Aaron Lu
2020-05-06 14:35 ` Peter Zijlstra
2020-05-08 8:44 ` Aaron Lu
2020-05-08 9:09 ` Peter Zijlstra
2020-05-08 12:34 ` Aaron Lu
2020-05-14 13:02 ` Peter Zijlstra
2020-05-14 22:51 ` Vineeth Remanan Pillai
2020-05-15 10:38 ` Peter Zijlstra
2020-05-15 10:43 ` Peter Zijlstra
2020-05-15 14:24 ` Vineeth Remanan Pillai
2020-05-16 3:42 ` Aaron Lu
2020-05-22 9:40 ` Aaron Lu
2020-06-08 1:41 ` Ning, Hongyu
2020-03-04 17:00 ` [RFC PATCH 10/13] sched: Trivial forced-newidle balancer vpillai
2020-03-04 17:00 ` [RFC PATCH 11/13] sched: migration changes for core scheduling vpillai
2020-06-12 13:21 ` Joel Fernandes
2020-06-12 21:32 ` Vineeth Remanan Pillai
2020-06-13 2:25 ` Joel Fernandes
2020-06-13 18:59 ` Vineeth Remanan Pillai
2020-06-15 2:05 ` Li, Aubrey
2020-03-04 17:00 ` [RFC PATCH 12/13] sched: cgroup tagging interface " vpillai
2020-06-26 15:06 ` Vineeth Remanan Pillai
2020-03-04 17:00 ` [RFC PATCH 13/13] sched: Debug bits vpillai
2020-03-04 17:36 ` [RFC PATCH 00/13] Core scheduling v5 Tim Chen
2020-03-04 17:42 ` Vineeth Remanan Pillai
2020-04-14 14:21 ` Peter Zijlstra
2020-04-15 16:32 ` Joel Fernandes
2020-04-17 11:12 ` Peter Zijlstra
2020-04-17 12:35 ` Alexander Graf
2020-04-17 13:08 ` Peter Zijlstra
2020-04-18 2:25 ` Joel Fernandes
2020-05-09 14:35 ` Dario Faggioli
[not found] ` <38805656-2e2f-222a-c083-692f4b113313@linux.intel.com>
2020-05-09 3:39 ` Ning, Hongyu
2020-05-14 20:51 ` FW: " Gruza, Agata
2020-05-10 23:46 ` [PATCH RFC] Add support for core-wide protection of IRQ and softirq Joel Fernandes (Google)
2020-05-11 13:49 ` Peter Zijlstra
2020-05-11 14:54 ` Joel Fernandes
2020-05-20 22:26 ` [PATCH RFC] sched: Add a per-thread core scheduling interface Joel Fernandes (Google)
2020-05-21 4:09 ` [PATCH RFC] sched: Add a per-thread core scheduling interface(Internet mail) benbjiang(蒋彪)
2020-05-21 13:49 ` Joel Fernandes
2020-05-21 8:51 ` [PATCH RFC] sched: Add a per-thread core scheduling interface Peter Zijlstra
2020-05-21 13:47 ` Joel Fernandes
2020-05-21 20:20 ` Vineeth Remanan Pillai
2020-05-22 12:59 ` Peter Zijlstra
2020-05-22 21:35 ` Joel Fernandes
2020-05-24 14:00 ` Phil Auld
2020-05-28 14:51 ` Joel Fernandes
2020-05-28 17:01 ` Peter Zijlstra
2020-05-28 18:17 ` Phil Auld
2020-05-28 18:34 ` Phil Auld
2020-05-28 18:23 ` Joel Fernandes
2020-05-21 18:31 ` Linus Torvalds
2020-05-21 20:40 ` Joel Fernandes
2020-05-21 21:58 ` Jesse Barnes
2020-05-22 16:33 ` Linus Torvalds
2020-05-20 22:37 ` [PATCH RFC v2] Add support for core-wide protection of IRQ and softirq Joel Fernandes (Google)
2020-05-20 22:48 ` [PATCH RFC] sched: Use sched-RCU in core-scheduling balancing logic Joel Fernandes (Google)
2020-05-21 22:52 ` Paul E. McKenney
2020-05-22 1:26 ` Joel Fernandes
2020-06-25 20:12 ` [RFC PATCH 00/13] Core scheduling v5 Vineeth Remanan Pillai
2020-06-26 1:47 ` Joel Fernandes
2020-06-26 14:36 ` Vineeth Remanan Pillai
2020-06-26 15:10 ` Joel Fernandes
2020-06-26 15:12 ` Joel Fernandes
2020-06-27 16:21 ` Joel Fernandes
2020-06-30 14:11 ` Phil Auld
2020-06-29 12:33 ` Li, Aubrey
2020-06-29 19:41 ` Vineeth Remanan Pillai
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=855831b59e1b3774b11c3e33050eac4cc4639f06.1583332765.git.vpillai@digitalocean.com \
--to=vpillai@digitalocean.com \
--cc=aaron.lwe@gmail.com \
--cc=aubrey.intel@gmail.com \
--cc=aubrey.li@linux.intel.com \
--cc=fweisbec@gmail.com \
--cc=jdesfossez@digitalocean.com \
--cc=joel@joelfernandes.org \
--cc=joelaf@google.com \
--cc=keescook@chromium.org \
--cc=kerrnel@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@techsingularity.net \
--cc=mingo@kernel.org \
--cc=naravamudan@digitalocean.com \
--cc=pauld@redhat.com \
--cc=pawan.kumar.gupta@linux.intel.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=tglx@linutronix.de \
--cc=tim.c.chen@linux.intel.com \
--cc=torvalds@linux-foundation.org \
--cc=valentin.schneider@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).