From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@kernel.org>,
rt@linutronix.de
Subject: [patch 07/15] sched/hotplug: Convert cpu_[in]active notifiers to state machine
Date: Thu, 10 Mar 2016 12:04:40 -0000 [thread overview]
Message-ID: <20160310120025.091204363@linutronix.de> (raw)
In-Reply-To: 20160310115406.940706476@linutronix.de
[-- Attachment #1: sched_hotplug__Convert_cpu__in_active_notifiers_to_state_machine.patch --]
[-- Type: text/plain, Size: 6029 bytes --]
Now that we reduced everything into single notifiers, it's simple to move them
into the hotplug state machine space.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 12 --------
include/linux/cpuhotplug.h | 1
include/linux/sched.h | 2 +
kernel/cpu.c | 8 ++++-
kernel/sched/core.c | 67 ++++++++++++++-------------------------------
5 files changed, 30 insertions(+), 60 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -59,18 +59,6 @@ struct notifier_block;
* CPU notifier priorities.
*/
enum {
- /*
- * SCHED_ACTIVE marks a cpu which is coming up active during
- * CPU_ONLINE and CPU_DOWN_FAILED and must be the first notifier. Is
- * also cpuset according to cpu_active mask right after activating the
- * cpu. During CPU_DOWN_PREPARE, SCHED_INACTIVE reversed the operation.
- *
- * This ordering guarantees consistent cpu_active mask and
- * migration behavior to all cpu notifiers.
- */
- CPU_PRI_SCHED_ACTIVE = INT_MAX,
- CPU_PRI_SCHED_INACTIVE = INT_MIN,
-
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -13,6 +13,7 @@ enum cpuhp_state {
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_ACTIVE,
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -374,6 +374,8 @@ extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
extern int sched_cpu_starting(unsigned int cpu);
+extern int sched_cpu_activate(unsigned int cpu);
+extern int sched_cpu_deactivate(unsigned int cpu);
extern void sched_show_task(struct task_struct *p);
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -904,8 +904,6 @@ void cpuhp_online_idle(enum cpuhp_state
st->state = CPUHP_AP_ONLINE_IDLE;
- /* The cpu is marked online, set it active now */
- set_cpu_active(cpu, true);
/* Unpark the stopper thread and the hotplug thread of this cpu */
stop_machine_unpark(cpu);
kthread_unpark(st->thread);
@@ -1240,6 +1238,12 @@ static struct cpuhp_step cpuhp_ap_states
[CPUHP_AP_ONLINE] = {
.name = "ap:online",
},
+ /* First state is scheduler control. Interrupts are enabled */
+ [CPUHP_AP_ACTIVE] = {
+ .name = "sched:active",
+ .startup = sched_cpu_activate,
+ .teardown = sched_cpu_deactivate,
+ },
/* Handle smpboot threads park/unpark */
[CPUHP_AP_SMPBOOT_THREADS] = {
.name = "smpboot:threads",
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6867,9 +6867,6 @@ static void sched_domains_numa_masks_set
int node = cpu_to_node(cpu);
int i, j;
- if (!sched_smp_initialized)
- return;
-
for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++) {
if (node_distance(j, node) <= sched_domains_numa_distance[i])
@@ -6882,9 +6879,6 @@ static void sched_domains_numa_masks_cle
{
int i, j;
- if (!sched_smp_initialized)
- return;
-
for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++)
cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
@@ -7284,12 +7278,9 @@ static int num_cpus_frozen; /* used to m
* If we come here as part of a suspend/resume, don't touch cpusets because we
* want to restore it back to its original state upon resume anyway.
*/
-static void cpuset_cpu_active(bool frozen)
+static void cpuset_cpu_active(void)
{
- if (!sched_smp_initialized)
- return;
-
- if (frozen) {
+ if (cpuhp_tasks_frozen) {
/*
* num_cpus_frozen tracks how many CPUs are involved in suspend
* resume sequence. As long as this is not the last online
@@ -7310,17 +7301,14 @@ static void cpuset_cpu_active(bool froze
cpuset_update_active_cpus(true);
}
-static int cpuset_cpu_inactive(unsigned int cpu, bool frozen)
+static int cpuset_cpu_inactive(unsigned int cpu)
{
unsigned long flags;
struct dl_bw *dl_b;
bool overflow;
int cpus;
- if (!sched_smp_initialized)
- return 0;
-
- if (!frozen) {
+ if (!cpuhp_tasks_frozen) {
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
@@ -7341,42 +7329,33 @@ static int cpuset_cpu_inactive(unsigned
return 0;
}
-static int sched_cpu_active(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+int sched_cpu_activate(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
+ set_cpu_active(cpu, true);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- set_cpu_active(cpu, true);
+ if (sched_smp_initialized) {
sched_domains_numa_masks_set(cpu);
- cpuset_cpu_active(action & CPU_TASKS_FROZEN);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
+ cpuset_cpu_active();
}
+ return 0;
}
-static int sched_cpu_inactive(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+int sched_cpu_deactivate(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
int ret;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- set_cpu_active(cpu, false);
- ret = cpuset_cpu_inactive(cpu, action & CPU_TASKS_FROZEN);
- if (ret) {
- set_cpu_active(cpu, true);
- return notifier_from_errno(ret);
- }
- sched_domains_numa_masks_clear(cpu);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
+ set_cpu_active(cpu, false);
+
+ if (!sched_smp_initialized)
+ return 0;
+
+ ret = cpuset_cpu_inactive(cpu);
+ if (ret) {
+ set_cpu_active(cpu, true);
+ return ret;
}
+ sched_domains_numa_masks_clear(cpu);
+ return 0;
}
int sched_cpu_starting(unsigned int cpu)
@@ -7430,10 +7409,6 @@ static int __init migration_init(void)
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
- /* Register cpu active notifiers */
- cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
- cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
-
return 0;
}
early_initcall(migration_init);
next prev parent reply other threads:[~2016-03-10 12:08 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-03-10 12:04 [patch 00/15] cpu/hotplug: Convert scheduler to hotplug state machine Thomas Gleixner
2016-03-10 12:04 ` [patch 01/15] cpu/hotplug: Document states better Thomas Gleixner
2016-03-10 12:04 ` [patch 03/15] sched: Make set_cpu_rq_start_time() a built in hotplug state Thomas Gleixner
2016-03-10 12:04 ` [patch 05/15] sched: Consolidate the notifier maze Thomas Gleixner
2016-03-10 12:04 ` [patch 04/15] sched: Allow hotplug notifiers to be setup early Thomas Gleixner
2016-03-10 12:04 ` [patch 06/15] sched: Move sched_domains_numa_masks_clear() to DOWN_PREPARE Thomas Gleixner
2016-03-10 12:04 ` Thomas Gleixner [this message]
2016-03-10 12:04 ` [patch 08/15] sched, hotplug: Move sync_rcu to be with set_cpu_active(false) Thomas Gleixner
2016-05-05 11:24 ` [tip:smp/hotplug] sched/hotplug: " tip-bot for Peter Zijlstra
2016-05-06 13:06 ` tip-bot for Peter Zijlstra
2016-03-10 12:04 ` [patch 10/15] sched/migration: Move calc_load_migrate() into CPU_DYING Thomas Gleixner
2016-05-05 11:24 ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2016-05-06 13:06 ` tip-bot for Thomas Gleixner
2016-07-12 4:37 ` [patch 10/15] " Anton Blanchard
2016-07-12 16:33 ` Thomas Gleixner
2016-07-12 18:49 ` Vaidyanathan Srinivasan
2016-07-12 20:05 ` Shilpasri G Bhat
2016-07-13 7:49 ` Peter Zijlstra
2016-07-13 13:40 ` [tip:sched/urgent] sched/core: Correct off by one bug in load migration calculation tip-bot for Thomas Gleixner
2016-03-10 12:04 ` [patch 09/15] sched/migration: Move prepare transition to SCHED_STARTING state Thomas Gleixner
2016-05-05 11:24 ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2016-05-06 13:06 ` tip-bot for Thomas Gleixner
2016-03-10 12:04 ` [patch 11/15] sched/migration: Move CPU_ONLINE into scheduler state Thomas Gleixner
2016-05-05 11:25 ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2016-05-06 13:07 ` tip-bot for Thomas Gleixner
2016-03-10 12:04 ` [patch 12/15] sched/hotplug: Move migration CPU_DYING to sched_cpu_dying() Thomas Gleixner
2016-05-05 11:25 ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2016-05-06 13:07 ` tip-bot for Thomas Gleixner
2016-03-10 12:04 ` [patch 14/15] sched/fair: Make ilb_notifier an explicit call Thomas Gleixner
2016-05-05 11:26 ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2016-05-06 13:08 ` tip-bot for Thomas Gleixner
2016-03-10 12:04 ` [patch 13/15] sched/hotplug: Make activate() the last hotplug step Thomas Gleixner
2016-05-05 11:25 ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2016-05-06 13:07 ` tip-bot for Thomas Gleixner
2016-03-10 12:04 ` [patch 15/15] sched: Make hrtick_notifier an explicit call Thomas Gleixner
2016-05-05 11:26 ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2016-05-06 13:08 ` tip-bot for Thomas Gleixner
2016-04-04 7:54 ` [patch 00/15] cpu/hotplug: Convert scheduler to hotplug state machine Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160310120025.091204363@linutronix.de \
--to=tglx@linutronix.de \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=rt@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).