All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: tglx@linutronix.de, mingo@kernel.org
Cc: linux-kernel@vger.kernel.org, bigeasy@linutronix.de,
	qais.yousef@arm.com, swood@redhat.com, peterz@infradead.org,
	valentin.schneider@arm.com, juri.lelli@redhat.com,
	vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de,
	bristot@redhat.com, vincent.donnefort@arm.com
Subject: [PATCH 6/9] sched: Massage set_cpus_allowed
Date: Mon, 21 Sep 2020 18:36:03 +0200	[thread overview]
Message-ID: <20200921163845.706308927@infradead.org> (raw)
In-Reply-To: 20200921163557.234036895@infradead.org

Thread a u32 flags word through the *set_cpus_allowed*() callchain.
This will allow adding behavioural tweaks for future users.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/sched/core.c     |   28 ++++++++++++++++++----------
 kernel/sched/deadline.c |    5 +++--
 kernel/sched/sched.h    |    7 +++++--
 3 files changed, 26 insertions(+), 14 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1828,13 +1828,14 @@ static int migration_cpu_stop(void *data
  * sched_class::set_cpus_allowed must do the below, but is not required to
  * actually call this function.
  */
-void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
 {
 	cpumask_copy(&p->cpus_mask, new_mask);
 	p->nr_cpus_allowed = cpumask_weight(new_mask);
 }
 
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+static void
+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
 {
 	struct rq *rq = task_rq(p);
 	bool queued, running;
@@ -1855,7 +1856,7 @@ void do_set_cpus_allowed(struct task_str
 	if (running)
 		put_prev_task(rq, p);
 
-	p->sched_class->set_cpus_allowed(p, new_mask);
+	p->sched_class->set_cpus_allowed(p, new_mask, flags);
 
 	if (queued)
 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
@@ -1863,6 +1864,11 @@ void do_set_cpus_allowed(struct task_str
 		set_next_task(rq, p);
 }
 
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+	__do_set_cpus_allowed(p, new_mask, 0);
+}
+
 /*
  * Change a given task's CPU affinity. Migrate the thread to a
  * proper CPU and schedule it away if the CPU it's executing on
@@ -1873,7 +1879,8 @@ void do_set_cpus_allowed(struct task_str
  * call is not atomic; no spinlocks may be held.
  */
 static int __set_cpus_allowed_ptr(struct task_struct *p,
-				  const struct cpumask *new_mask, bool check)
+				  const struct cpumask *new_mask,
+				  u32 flags)
 {
 	const struct cpumask *cpu_valid_mask = cpu_active_mask;
 	unsigned int dest_cpu;
@@ -1895,7 +1902,7 @@ static int __set_cpus_allowed_ptr(struct
 	 * Must re-check here, to close a race against __kthread_bind(),
 	 * sched_setaffinity() is not guaranteed to observe the flag.
 	 */
-	if (check && (p->flags & PF_NO_SETAFFINITY)) {
+	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1914,7 +1921,7 @@ static int __set_cpus_allowed_ptr(struct
 		goto out;
 	}
 
-	do_set_cpus_allowed(p, new_mask);
+	__do_set_cpus_allowed(p, new_mask, flags);
 
 	if (p->flags & PF_KTHREAD) {
 		/*
@@ -1951,7 +1958,7 @@ static int __set_cpus_allowed_ptr(struct
 
 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 {
-	return __set_cpus_allowed_ptr(p, new_mask, false);
+	return __set_cpus_allowed_ptr(p, new_mask, 0);
 }
 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 
@@ -2410,7 +2417,8 @@ void sched_set_stop_task(int cpu, struct
 #else
 
 static inline int __set_cpus_allowed_ptr(struct task_struct *p,
-					 const struct cpumask *new_mask, bool check)
+					 const struct cpumask *new_mask,
+					 u32 flags)
 {
 	return set_cpus_allowed_ptr(p, new_mask);
 }
@@ -6025,7 +6033,7 @@ long sched_setaffinity(pid_t pid, const
 	}
 #endif
 again:
-	retval = __set_cpus_allowed_ptr(p, new_mask, true);
+	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
 
 	if (!retval) {
 		cpuset_cpus_allowed(p, cpus_allowed);
@@ -6608,7 +6616,7 @@ void init_idle(struct task_struct *idle,
 	 *
 	 * And since this is boot we can forgo the serialization.
 	 */
-	set_cpus_allowed_common(idle, cpumask_of(cpu));
+	set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
 #endif
 	/*
 	 * We're having a chicken and egg problem, even though we are
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2280,7 +2280,8 @@ static void task_woken_dl(struct rq *rq,
 }
 
 static void set_cpus_allowed_dl(struct task_struct *p,
-				const struct cpumask *new_mask)
+				const struct cpumask *new_mask,
+				u32 flags)
 {
 	struct root_domain *src_rd;
 	struct rq *rq;
@@ -2309,7 +2310,7 @@ static void set_cpus_allowed_dl(struct t
 		raw_spin_unlock(&src_dl_b->lock);
 	}
 
-	set_cpus_allowed_common(p, new_mask);
+	set_cpus_allowed_common(p, new_mask, flags);
 }
 
 /* Assumes rq->lock is held */
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1806,7 +1806,8 @@ struct sched_class {
 	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
 
 	void (*set_cpus_allowed)(struct task_struct *p,
-				 const struct cpumask *newmask);
+				 const struct cpumask *newmask,
+				 u32 flags);
 
 	void (*rq_online)(struct rq *rq);
 	void (*rq_offline)(struct rq *rq);
@@ -1899,7 +1900,9 @@ extern void update_group_capacity(struct
 
 extern void trigger_load_balance(struct rq *rq);
 
-extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
+#define SCA_CHECK		0x01
+
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
 
 #endif
 



  parent reply	other threads:[~2020-09-21 17:01 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-21 16:35 [PATCH 0/9] sched: Migrate disable support Peter Zijlstra
2020-09-21 16:35 ` [PATCH 1/9] stop_machine: Add function and caller debug info Peter Zijlstra
2020-09-21 16:35 ` [PATCH 2/9] sched: Fix balance_callback() Peter Zijlstra
2020-09-23 14:08   ` Thomas Gleixner
2020-09-21 16:36 ` [PATCH 3/9] sched/hotplug: Ensure only per-cpu kthreads run during hotplug Peter Zijlstra
2020-09-25 16:38   ` Dietmar Eggemann
2020-10-02 14:20     ` Peter Zijlstra
2020-09-21 16:36 ` [PATCH 4/9] sched/core: Wait for tasks being pushed away on hotplug Peter Zijlstra
2020-09-21 16:36 ` [PATCH 5/9] sched/hotplug: Consolidate task migration on CPU unplug Peter Zijlstra
2020-10-01 17:12   ` Vincent Donnefort
2020-10-02 14:17     ` Peter Zijlstra
2020-09-21 16:36 ` Peter Zijlstra [this message]
2020-09-23 14:07   ` [PATCH 6/9] sched: Massage set_cpus_allowed Thomas Gleixner
2020-09-21 16:36 ` [PATCH 7/9] sched: Add migrate_disable() Peter Zijlstra
2020-09-21 19:16   ` Thomas Gleixner
2020-09-21 20:42     ` Daniel Bristot de Oliveira
2020-09-23  8:31       ` Thomas Gleixner
2020-09-23 10:51         ` Daniel Bristot de Oliveira
2020-09-23 17:08         ` peterz
2020-09-23 17:54           ` Daniel Bristot de Oliveira
2020-09-23  7:48     ` peterz
2020-09-24 11:53   ` Valentin Schneider
2020-09-24 12:29     ` Peter Zijlstra
2020-09-24 12:33       ` Valentin Schneider
2020-09-24 12:35     ` Peter Zijlstra
2020-09-25 16:50   ` Sebastian Andrzej Siewior
2020-10-02 14:21     ` Peter Zijlstra
2020-10-02 14:36       ` Sebastian Andrzej Siewior
2020-09-21 16:36 ` [PATCH 8/9] sched: Fix migrate_disable() vs set_cpus_allowed_ptr() Peter Zijlstra
2020-09-24 19:59   ` Valentin Schneider
2020-09-25  8:43     ` Peter Zijlstra
2020-09-25 10:07       ` Valentin Schneider
2020-09-25  9:05     ` Peter Zijlstra
2020-09-25  9:56       ` Peter Zijlstra
2020-09-25 10:09         ` Valentin Schneider
2020-09-21 16:36 ` [PATCH 9/9] sched/core: Make migrate disable and CPU hotplug cooperative Peter Zijlstra
2020-09-25  9:12 ` [PATCH 0/9] sched: Migrate disable support Dietmar Eggemann
2020-09-25 10:10   ` Peter Zijlstra
2020-09-25 11:58     ` Dietmar Eggemann
2020-09-25 12:19       ` Valentin Schneider
2020-09-25 17:49         ` Valentin Schneider
2020-09-29  9:15           ` Dietmar Eggemann
2020-09-25 18:17 ` Sebastian Andrzej Siewior
2020-09-25 19:32   ` Valentin Schneider
2020-10-02 14:30     ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200921163845.706308927@infradead.org \
    --to=peterz@infradead.org \
    --cc=bigeasy@linutronix.de \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@suse.de \
    --cc=mingo@kernel.org \
    --cc=qais.yousef@arm.com \
    --cc=rostedt@goodmis.org \
    --cc=swood@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=valentin.schneider@arm.com \
    --cc=vincent.donnefort@arm.com \
    --cc=vincent.guittot@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.