From: Peter Zijlstra <peterz@infradead.org>
To: tglx@linutronix.de, mingo@kernel.org
Cc: linux-kernel@vger.kernel.org, bigeasy@linutronix.de,
qais.yousef@arm.com, swood@redhat.com, peterz@infradead.org,
valentin.schneider@arm.com, juri.lelli@redhat.com,
vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de,
bristot@redhat.com, vincent.donnefort@arm.com, tj@kernel.org,
ouwen210@hotmail.com
Subject: [PATCH v4 08/19] sched: Massage set_cpus_allowed()
Date: Fri, 23 Oct 2020 12:12:06 +0200 [thread overview]
Message-ID: <20201023102346.729082820@infradead.org> (raw)
In-Reply-To: 20201023101158.088940906@infradead.org
Thread a u32 flags word through the *set_cpus_allowed*() callchain.
This will allow adding behavioural tweaks for future users.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/core.c | 28 ++++++++++++++++++----------
kernel/sched/deadline.c | 5 +++--
kernel/sched/sched.h | 7 +++++--
3 files changed, 26 insertions(+), 14 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1828,13 +1828,14 @@ static int migration_cpu_stop(void *data
* sched_class::set_cpus_allowed must do the below, but is not required to
* actually call this function.
*/
-void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
cpumask_copy(&p->cpus_mask, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+static void
+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
struct rq *rq = task_rq(p);
bool queued, running;
@@ -1855,7 +1856,7 @@ void do_set_cpus_allowed(struct task_str
if (running)
put_prev_task(rq, p);
- p->sched_class->set_cpus_allowed(p, new_mask);
+ p->sched_class->set_cpus_allowed(p, new_mask, flags);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
@@ -1863,6 +1864,11 @@ void do_set_cpus_allowed(struct task_str
set_next_task(rq, p);
}
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ __do_set_cpus_allowed(p, new_mask, 0);
+}
+
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
@@ -1873,7 +1879,8 @@ void do_set_cpus_allowed(struct task_str
* call is not atomic; no spinlocks may be held.
*/
static int __set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask, bool check)
+ const struct cpumask *new_mask,
+ u32 flags)
{
const struct cpumask *cpu_valid_mask = cpu_active_mask;
unsigned int dest_cpu;
@@ -1895,7 +1902,7 @@ static int __set_cpus_allowed_ptr(struct
* Must re-check here, to close a race against __kthread_bind(),
* sched_setaffinity() is not guaranteed to observe the flag.
*/
- if (check && (p->flags & PF_NO_SETAFFINITY)) {
+ if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
goto out;
}
@@ -1914,7 +1921,7 @@ static int __set_cpus_allowed_ptr(struct
goto out;
}
- do_set_cpus_allowed(p, new_mask);
+ __do_set_cpus_allowed(p, new_mask, flags);
if (p->flags & PF_KTHREAD) {
/*
@@ -1951,7 +1958,7 @@ static int __set_cpus_allowed_ptr(struct
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
- return __set_cpus_allowed_ptr(p, new_mask, false);
+ return __set_cpus_allowed_ptr(p, new_mask, 0);
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
@@ -2410,7 +2417,8 @@ void sched_set_stop_task(int cpu, struct
#else
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask, bool check)
+ const struct cpumask *new_mask,
+ u32 flags)
{
return set_cpus_allowed_ptr(p, new_mask);
}
@@ -6025,7 +6033,7 @@ long sched_setaffinity(pid_t pid, const
}
#endif
again:
- retval = __set_cpus_allowed_ptr(p, new_mask, true);
+ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
@@ -6608,7 +6616,7 @@ void init_idle(struct task_struct *idle,
*
* And since this is boot we can forgo the serialization.
*/
- set_cpus_allowed_common(idle, cpumask_of(cpu));
+ set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
#endif
/*
* We're having a chicken and egg problem, even though we are
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2280,7 +2280,8 @@ static void task_woken_dl(struct rq *rq,
}
static void set_cpus_allowed_dl(struct task_struct *p,
- const struct cpumask *new_mask)
+ const struct cpumask *new_mask,
+ u32 flags)
{
struct root_domain *src_rd;
struct rq *rq;
@@ -2309,7 +2310,7 @@ static void set_cpus_allowed_dl(struct t
raw_spin_unlock(&src_dl_b->lock);
}
- set_cpus_allowed_common(p, new_mask);
+ set_cpus_allowed_common(p, new_mask, flags);
}
/* Assumes rq->lock is held */
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1806,7 +1806,8 @@ struct sched_class {
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
- const struct cpumask *newmask);
+ const struct cpumask *newmask,
+ u32 flags);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
@@ -1899,7 +1900,9 @@ extern void update_group_capacity(struct
extern void trigger_load_balance(struct rq *rq);
-extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
+#define SCA_CHECK 0x01
+
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
#endif
next prev parent reply other threads:[~2020-10-23 10:26 UTC|newest]
Thread overview: 81+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-23 10:11 [PATCH v4 00/19] sched: Migrate disable support Peter Zijlstra
2020-10-23 10:11 ` [PATCH v4 01/19] stop_machine: Add function and caller debug info Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 02/19] sched: Fix balance_callback() Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-11-11 20:30 ` Paul Bolle
2020-11-11 20:45 ` Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 03/19] sched/hotplug: Ensure only per-cpu kthreads run during hotplug Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 04/19] sched/core: Wait for tasks being pushed away on hotplug Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Thomas Gleixner
2020-10-23 10:12 ` [PATCH v4 05/19] workqueue: Manually break affinity " Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 06/19] sched/hotplug: Consolidate task migration on CPU unplug Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Thomas Gleixner
2020-10-23 10:12 ` [PATCH v4 07/19] sched: Fix hotplug vs CPU bandwidth control Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` Peter Zijlstra [this message]
2020-11-11 8:23 ` [tip: sched/core] sched: Massage set_cpus_allowed() tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 09/19] sched: Add migrate_disable() Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 10/19] sched: Fix migrate_disable() vs set_cpus_allowed_ptr() Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-11-12 16:38 ` [PATCH v4 10/19] " Qian Cai
2020-11-12 17:26 ` Valentin Schneider
2020-11-12 18:01 ` Qian Cai
2020-11-12 19:31 ` Valentin Schneider
2020-11-12 19:41 ` Qian Cai
2020-11-12 20:37 ` Qian Cai
2020-11-12 21:26 ` Valentin Schneider
2020-11-13 10:27 ` Peter Zijlstra
2020-11-12 18:35 ` Qian Cai
2020-11-20 12:34 ` [tip: sched/core] sched/core: Add missing completion for affine_move_task() waiters tip-bot2 for Valentin Schneider
2020-10-23 10:12 ` [PATCH v4 11/19] sched/core: Make migrate disable and CPU hotplug cooperative Peter Zijlstra
2020-10-29 16:27 ` Valentin Schneider
2020-10-29 17:34 ` Peter Zijlstra
2020-10-29 17:55 ` Valentin Schneider
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Thomas Gleixner
2020-11-13 15:06 ` [PATCH v4 11/19] " Qian Cai
2020-11-17 19:28 ` Valentin Schneider
2020-11-18 14:44 ` Qian Cai
2020-11-23 18:13 ` Sebastian Andrzej Siewior
2020-12-02 21:59 ` Qian Cai
2020-12-03 12:31 ` Qian Cai
2020-12-04 0:23 ` Qian Cai
2020-12-04 21:19 ` Qian Cai
2020-12-05 18:37 ` Valentin Schneider
2020-12-06 1:17 ` Qian Cai
2020-12-07 19:27 ` Valentin Schneider
2020-12-08 13:46 ` Qian Cai
2020-12-09 19:16 ` Valentin Schneider
2020-10-23 10:12 ` [PATCH v4 12/19] sched,rt: Use cpumask_any*_distribute() Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 13/19] sched,rt: Use the full cpumask for balancing Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 14/19] sched, lockdep: Annotate ->pi_lock recursion Peter Zijlstra
2020-10-29 16:27 ` Valentin Schneider
2020-10-29 17:38 ` Peter Zijlstra
2020-10-29 18:09 ` Valentin Schneider
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 15/19] sched: Fix migrate_disable() vs rt/dl balancing Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-12-26 13:54 ` [PATCH v4 15/19] " Qais Yousef
2021-03-05 14:56 ` Peter Zijlstra
2021-03-05 15:41 ` Valentin Schneider
2021-03-05 17:11 ` Qais Yousef
2021-03-10 14:44 ` Qais Yousef
2021-03-05 16:48 ` Qais Yousef
2020-10-23 10:12 ` [PATCH v4 16/19] sched/proc: Print accurate cpumask vs migrate_disable() Peter Zijlstra
2020-11-11 8:23 ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 17/19] sched: Add migrate_disable() tracepoints Peter Zijlstra
2020-10-29 16:27 ` Valentin Schneider
2020-10-29 17:43 ` Peter Zijlstra
2020-10-29 17:56 ` Valentin Schneider
2020-10-29 17:59 ` Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 18/19] sched: Deny self-issued __set_cpus_allowed_ptr() when migrate_disable() Peter Zijlstra
2020-10-23 10:12 ` [PATCH v4 19/19] sched: Comment affine_move_task() Peter Zijlstra
2020-10-29 16:27 ` Valentin Schneider
2020-10-29 17:44 ` Peter Zijlstra
2020-10-29 19:03 ` [PATCH v4 00/19] sched: Migrate disable support Valentin Schneider
2020-11-09 16:39 ` Daniel Bristot de Oliveira
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201023102346.729082820@infradead.org \
--to=peterz@infradead.org \
--cc=bigeasy@linutronix.de \
--cc=bristot@redhat.com \
--cc=bsegall@google.com \
--cc=dietmar.eggemann@arm.com \
--cc=juri.lelli@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@kernel.org \
--cc=ouwen210@hotmail.com \
--cc=qais.yousef@arm.com \
--cc=rostedt@goodmis.org \
--cc=swood@redhat.com \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=valentin.schneider@arm.com \
--cc=vincent.donnefort@arm.com \
--cc=vincent.guittot@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).