linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: mingo@kernel.org, juri.lelli@redhat.com
Cc: linux-kernel@vger.kernel.org, dietmar.eggemann@arm.com,
	luca.abeni@santannapisa.it, bristot@redhat.com,
	balsini@android.com, dvyukov@google.com, tglx@linutronix.de,
	vpillai@digitalocean.com, rostedt@goodmis.org,
	peterz@infradead.org
Subject: [RFC][PATCH 05/13] sched: Add task_struct pointer to sched_class::set_curr_task
Date: Fri, 26 Jul 2019 16:54:14 +0200	[thread overview]
Message-ID: <20190726161357.638193058@infradead.org> (raw)
In-Reply-To: 20190726145409.947503076@infradead.org

In preparation of further separating pick_next_task() and
set_curr_task() we have to pass the actual task into it, while there,
rename the thing to better pair with put_prev_task().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/sched/core.c      |   12 ++++++------
 kernel/sched/deadline.c  |    7 +------
 kernel/sched/fair.c      |   17 ++++++++++++++---
 kernel/sched/idle.c      |   27 +++++++++++++++------------
 kernel/sched/rt.c        |    7 +------
 kernel/sched/sched.h     |    8 +++++---
 kernel/sched/stop_task.c |   17 +++++++----------
 7 files changed, 49 insertions(+), 46 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_str
 	if (queued)
 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
 	if (running)
-		set_curr_task(rq, p);
+		set_next_task(rq, p);
 }
 
 /*
@@ -4273,7 +4273,7 @@ void rt_mutex_setprio(struct task_struct
 	if (queued)
 		enqueue_task(rq, p, queue_flag);
 	if (running)
-		set_curr_task(rq, p);
+		set_next_task(rq, p);
 
 	check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
@@ -4340,7 +4340,7 @@ void set_user_nice(struct task_struct *p
 			resched_curr(rq);
 	}
 	if (running)
-		set_curr_task(rq, p);
+		set_next_task(rq, p);
 out_unlock:
 	task_rq_unlock(rq, p, &rf);
 }
@@ -4783,7 +4783,7 @@ static int __sched_setscheduler(struct t
 		enqueue_task(rq, p, queue_flags);
 	}
 	if (running)
-		set_curr_task(rq, p);
+		set_next_task(rq, p);
 
 	check_class_changed(rq, p, prev_class, oldprio);
 
@@ -5972,7 +5972,7 @@ void sched_setnuma(struct task_struct *p
 	if (queued)
 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
 	if (running)
-		set_curr_task(rq, p);
+		set_next_task(rq, p);
 	task_rq_unlock(rq, p, &rf);
 }
 #endif /* CONFIG_NUMA_BALANCING */
@@ -6853,7 +6853,7 @@ void sched_move_task(struct task_struct
 	if (queued)
 		enqueue_task(rq, tsk, queue_flags);
 	if (running)
-		set_curr_task(rq, tsk);
+		set_next_task(rq, tsk);
 
 	task_rq_unlock(rq, tsk, &rf);
 }
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1812,11 +1812,6 @@ static void task_fork_dl(struct task_str
 	 */
 }
 
-static void set_curr_task_dl(struct rq *rq)
-{
-	set_next_task_dl(rq, rq->curr);
-}
-
 #ifdef CONFIG_SMP
 
 /* Only try algorithms three times */
@@ -2404,6 +2399,7 @@ const struct sched_class dl_sched_class
 
 	.pick_next_task		= pick_next_task_dl,
 	.put_prev_task		= put_prev_task_dl,
+	.set_next_task		= set_next_task_dl,
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_dl,
@@ -2414,7 +2410,6 @@ const struct sched_class dl_sched_class
 	.task_woken		= task_woken_dl,
 #endif
 
-	.set_curr_task		= set_curr_task_dl,
 	.task_tick		= task_tick_dl,
 	.task_fork              = task_fork_dl,
 
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10121,9 +10121,19 @@ static void switched_to_fair(struct rq *
  * This routine is mostly called to set cfs_rq->curr field when a task
  * migrates between groups/classes.
  */
-static void set_curr_task_fair(struct rq *rq)
+static void set_next_task_fair(struct rq *rq, struct task_struct *p)
 {
-	struct sched_entity *se = &rq->curr->se;
+	struct sched_entity *se = &p->se;
+
+#ifdef CONFIG_SMP
+	if (task_on_rq_queued(p)) {
+		/*
+		 * Move the next running task to the front of the list, so our
+		 * cfs_tasks list becomes MRU one.
+		 */
+		list_move(&se->group_node, &rq->cfs_tasks);
+	}
+#endif
 
 	for_each_sched_entity(se) {
 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -10394,7 +10404,9 @@ const struct sched_class fair_sched_clas
 	.check_preempt_curr	= check_preempt_wakeup,
 
 	.pick_next_task		= pick_next_task_fair,
+
 	.put_prev_task		= put_prev_task_fair,
+	.set_next_task          = set_next_task_fair,
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_fair,
@@ -10407,7 +10419,6 @@ const struct sched_class fair_sched_clas
 	.set_cpus_allowed	= set_cpus_allowed_common,
 #endif
 
-	.set_curr_task          = set_curr_task_fair,
 	.task_tick		= task_tick_fair,
 	.task_fork		= task_fork_fair,
 
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -374,14 +374,25 @@ static void check_preempt_curr_idle(stru
 	resched_curr(rq);
 }
 
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+{
+}
+
+static void set_next_task_idle(struct rq *rq, struct task_struct *next)
+{
+	update_idle_core(rq);
+	schedstat_inc(rq->sched_goidle);
+}
+
 static struct task_struct *
 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
+	struct task_struct *next = rq->idle;
+
 	put_prev_task(rq, prev);
-	update_idle_core(rq);
-	schedstat_inc(rq->sched_goidle);
+	set_next_task_idle(rq, next);
 
-	return rq->idle;
+	return next;
 }
 
 /*
@@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct
 	raw_spin_lock_irq(&rq->lock);
 }
 
-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
-{
-}
-
 /*
  * scheduler tick hitting a task of our scheduling class.
  *
@@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq
 {
 }
 
-static void set_curr_task_idle(struct rq *rq)
-{
-}
-
 static void switched_to_idle(struct rq *rq, struct task_struct *p)
 {
 	BUG();
@@ -451,13 +454,13 @@ const struct sched_class idle_sched_clas
 
 	.pick_next_task		= pick_next_task_idle,
 	.put_prev_task		= put_prev_task_idle,
+	.set_next_task          = set_next_task_idle,
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_idle,
 	.set_cpus_allowed	= set_cpus_allowed_common,
 #endif
 
-	.set_curr_task          = set_curr_task_idle,
 	.task_tick		= task_tick_idle,
 
 	.get_rr_interval	= get_rr_interval_idle,
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2355,11 +2355,6 @@ static void task_tick_rt(struct rq *rq,
 	}
 }
 
-static void set_curr_task_rt(struct rq *rq)
-{
-	set_next_task_rt(rq, rq->curr);
-}
-
 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
 {
 	/*
@@ -2381,6 +2376,7 @@ const struct sched_class rt_sched_class
 
 	.pick_next_task		= pick_next_task_rt,
 	.put_prev_task		= put_prev_task_rt,
+	.set_next_task          = set_next_task_rt,
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_rt,
@@ -2392,7 +2388,6 @@ const struct sched_class rt_sched_class
 	.switched_from		= switched_from_rt,
 #endif
 
-	.set_curr_task          = set_curr_task_rt,
 	.task_tick		= task_tick_rt,
 
 	.get_rr_interval	= get_rr_interval_rt,
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1711,6 +1711,7 @@ struct sched_class {
 					       struct task_struct *prev,
 					       struct rq_flags *rf);
 	void (*put_prev_task)(struct rq *rq, struct task_struct *p);
+	void (*set_next_task)(struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
@@ -1725,7 +1726,6 @@ struct sched_class {
 	void (*rq_offline)(struct rq *rq);
 #endif
 
-	void (*set_curr_task)(struct rq *rq);
 	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
 	void (*task_fork)(struct task_struct *p);
 	void (*task_dead)(struct task_struct *p);
@@ -1755,12 +1755,14 @@ struct sched_class {
 
 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
 {
+	WARN_ON_ONCE(rq->curr != prev);
 	prev->sched_class->put_prev_task(rq, prev);
 }
 
-static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
+static inline void set_next_task(struct rq *rq, struct task_struct *next)
 {
-	curr->sched_class->set_curr_task(rq);
+	WARN_ON_ONCE(rq->curr != next);
+	next->sched_class->set_next_task(rq, next);
 }
 
 #ifdef CONFIG_SMP
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, s
 	/* we're never preempted */
 }
 
+static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
+{
+	stop->se.exec_start = rq_clock_task(rq);
+}
+
 static struct task_struct *
 pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
@@ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struc
 		return NULL;
 
 	put_prev_task(rq, prev);
-
-	stop->se.exec_start = rq_clock_task(rq);
+	set_next_task_stop(rq, stop);
 
 	return stop;
 }
@@ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq
 {
 }
 
-static void set_curr_task_stop(struct rq *rq)
-{
-	struct task_struct *stop = rq->stop;
-
-	stop->se.exec_start = rq_clock_task(rq);
-}
-
 static void switched_to_stop(struct rq *rq, struct task_struct *p)
 {
 	BUG(); /* its impossible to change to this class */
@@ -128,13 +125,13 @@ const struct sched_class stop_sched_clas
 
 	.pick_next_task		= pick_next_task_stop,
 	.put_prev_task		= put_prev_task_stop,
+	.set_next_task          = set_next_task_stop,
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_stop,
 	.set_cpus_allowed	= set_cpus_allowed_common,
 #endif
 
-	.set_curr_task          = set_curr_task_stop,
 	.task_tick		= task_tick_stop,
 
 	.get_rr_interval	= get_rr_interval_stop,



  parent reply	other threads:[~2019-07-26 16:20 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-26 14:54 [RFC][PATCH 00/13] SCHED_DEADLINE server infrastructure Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 01/13] sched/deadline: Impose global limits on sched_attr::sched_period Peter Zijlstra
2019-07-29  8:57   ` Juri Lelli
2019-07-29 11:45     ` Daniel Bristot de Oliveira
2019-08-02 17:21   ` Alessio Balsini
2019-08-05 11:53     ` Peter Zijlstra
2019-08-22 12:29       ` Alessio Balsini
2019-08-22 16:51         ` Peter Zijlstra
2019-08-31 14:41           ` Alessio Balsini
2019-09-02  9:16             ` Peter Zijlstra
2019-09-02 12:31               ` Peter Zijlstra
2019-09-04 10:16               ` Steven Rostedt
2019-09-04 11:30                 ` Peter Zijlstra
2019-09-04 13:24                   ` Joel Fernandes
2019-09-04 14:11                     ` Will Deacon
2019-09-04 14:35                       ` Joel Fernandes
2019-09-04 15:52                     ` Peter Zijlstra
2019-10-23 17:17       ` [PATCH 4.4 4.9 4.14] loop: Add LOOP_SET_DIRECT_IO to compat ioctl Alessio Balsini
2019-10-23 17:22         ` Alessio Balsini
2019-10-25  0:17         ` Sasha Levin
2020-05-20 18:38   ` [RFC][PATCH 01/13] sched/deadline: Impose global limits on sched_attr::sched_period Juri Lelli
2020-05-21 13:45     ` Daniel Bristot de Oliveira
2020-06-16 12:21   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 02/13] stop_machine: Fix stop_cpus_in_progress ordering Peter Zijlstra
2019-07-30 13:16   ` Phil Auld
2019-07-30 13:22   ` Steven Rostedt
2019-07-26 14:54 ` [RFC][PATCH 03/13] sched: Fix kerneldoc comment for ia64_set_curr_task Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 04/13] sched/{rt,deadline}: Fix set_next_task vs pick_next_task Peter Zijlstra
2019-07-29  9:25   ` Juri Lelli
2019-07-29 11:15     ` Peter Zijlstra
2019-07-29 11:27       ` Juri Lelli
2019-07-29 13:04         ` Peter Zijlstra
2019-07-29 13:17           ` Juri Lelli
2019-07-29 14:40             ` Peter Zijlstra
2019-07-26 14:54 ` Peter Zijlstra [this message]
2019-07-26 14:54 ` [RFC][PATCH 06/13] sched/fair: Export newidle_balance() Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 07/13] sched: Allow put_prev_task() to drop rq->lock Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 08/13] sched: Rework pick_next_task() slow-path Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 09/13] sched: Unify runtime accounting across classes Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 10/13] sched/deadline: Collect sched_dl_entity initialization Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 11/13] sched/deadline: Move bandwidth accounting into {en,de}queue_dl_entity Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 12/13] sched/deadline: Introduce deadline servers Peter Zijlstra
2019-08-07 16:31   ` Dietmar Eggemann
2019-08-08  6:52     ` Juri Lelli
2019-08-08  7:52       ` Dietmar Eggemann
2019-08-08  7:56     ` Peter Zijlstra
2019-08-08  8:11       ` Dietmar Eggemann
2019-08-08  8:46         ` Juri Lelli
2019-08-08  8:57           ` Dietmar Eggemann
2019-08-08  9:27             ` Juri Lelli
2019-08-08  9:45               ` Juri Lelli
2019-08-30 11:24                 ` Peter Zijlstra
2019-09-06  9:36                   ` Juri Lelli
2019-08-08 10:31           ` Peter Zijlstra
2019-08-09  7:13             ` Juri Lelli
2019-08-08  6:59   ` Juri Lelli
2019-08-09  9:17   ` Dietmar Eggemann
2019-08-09 12:16     ` Juri Lelli
2019-07-26 14:54 ` [RFC][PATCH 13/13] sched/fair: Add trivial fair server Peter Zijlstra
2019-07-26 20:01 ` [RFC][PATCH 00/13] SCHED_DEADLINE server infrastructure luca abeni
2019-09-03 14:27 ` Alessio Balsini
2019-09-04 10:50   ` Juri Lelli
2019-09-04 11:32     ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190726161357.638193058@infradead.org \
    --to=peterz@infradead.org \
    --cc=balsini@android.com \
    --cc=bristot@redhat.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dvyukov@google.com \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luca.abeni@santannapisa.it \
    --cc=mingo@kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=vpillai@digitalocean.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).