All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] sched: Fix PI handling vs sched_setscheduler()
@ 2016-01-19 11:18 Peter Zijlstra
  2016-01-20 10:08 ` Juri Lelli
  0 siblings, 1 reply; 5+ messages in thread
From: Peter Zijlstra @ 2016-01-19 11:18 UTC (permalink / raw)
  To: mingo
  Cc: linux-kernel, Thomas Gleixner, Steven Rostedt, Juri Lelli, Andrea Parri

Andrea Parri reported:

> I found that the following scenario (with CONFIG_RT_GROUP_SCHED=y) is not
> handled correctly:
>
>     T1 (prio = 20)
>        lock(rtmutex);
>
>     T2 (prio = 20)
>        blocks on rtmutex  (rt_nr_boosted = 0 on T1's rq)
>
>     T1 (prio = 20)
>        sys_set_scheduler(prio = 0)
>           [new_effective_prio == oldprio]
>           T1 prio = 20    (rt_nr_boosted = 0 on T1's rq)
>
> The last step is incorrect as T1 is now boosted (c.f., rt_se_boosted());
> in particular, if we continue with
>
>    T1 (prio = 20)
>       unlock(rtmutex)
>          wakeup(T2)
>          adjust_prio(T1)
>             [prio != rt_mutex_getprio(T1)]
> 	    dequeue(T1)
> 	       rt_nr_boosted = (unsigned long)(-1)
> 	       ...
>             T1 prio = 0
>
> then we end up leaving rt_nr_boosted in an "inconsistent" state.
>
> The simple program attached could reproduce the previous scenario; note
> that, as a consequence of the presence of this state, the "assertion"
>
>     WARN_ON(!rt_nr_running && rt_nr_boosted)
>
> from dec_rt_group() may trigger.

Hmm, so normally we dequeue/enqueue tasks in sched_setscheduler(), which
would ensure the accounting stays correct. However in the early PI path
we fail to do so.

So this was introduced by c365c292d0590, which fixed another problem
exactly because that dequeue/enqueue, joy.

Fix this by teaching rt about DEQUEUE_SAVE/ENQUEUE_RESTORE and have it
preserve runqueue location with that option. This requires decoupling
the on_rt_rq() state from being on the list.

In order to allow for explicit movement during the SAVE/RESTORE,
introduce {DE,EN}QUEUE_MOVE. We still must use SAVE/RESTORE in these
cases to preserve other invariants.

Respecting the SAVE/RESTORE flags also has the (nice) side-effect that
things like sys_nice()/sys_sched_setaffinity() also do not reorder
FIFO tasks (whereas they used to before this patch).

Cc: tglx@linutronix.de
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Juri Lelli <juri.lelli@arm.com>
Reported-by: Andrea Parri <parri.andrea@gmail.com>
Tested-by: Andrea Parri <parri.andrea@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/sched.h |    2 +
 kernel/sched/core.c   |   35 ++++++++++----------
 kernel/sched/rt.c     |   86 ++++++++++++++++++++++++++++++++++----------------
 kernel/sched/sched.h  |   31 +++++++++++++-----
 4 files changed, 104 insertions(+), 50 deletions(-)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1287,6 +1287,8 @@ struct sched_rt_entity {
 	unsigned long timeout;
 	unsigned long watchdog_stamp;
 	unsigned int time_slice;
+	unsigned short on_rq;
+	unsigned short on_list;
 
 	struct sched_rt_entity *back;
 #ifdef CONFIG_RT_GROUP_SCHED
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL(default_wake_function);
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-	int oldprio, queued, running, enqueue_flag = ENQUEUE_RESTORE;
+	int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
 	struct rq *rq;
 	const struct sched_class *prev_class;
 
@@ -3494,11 +3494,15 @@ void rt_mutex_setprio(struct task_struct
 
 	trace_sched_pi_setprio(p, prio);
 	oldprio = p->prio;
+
+	if (oldprio == prio)
+		queue_flag &= ~DEQUEUE_MOVE;
+
 	prev_class = p->sched_class;
 	queued = task_on_rq_queued(p);
 	running = task_current(rq, p);
 	if (queued)
-		dequeue_task(rq, p, DEQUEUE_SAVE);
+		dequeue_task(rq, p, queue_flag);
 	if (running)
 		put_prev_task(rq, p);
 
@@ -3516,7 +3520,7 @@ void rt_mutex_setprio(struct task_struct
 		if (!dl_prio(p->normal_prio) ||
 		    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
 			p->dl.dl_boosted = 1;
-			enqueue_flag |= ENQUEUE_REPLENISH;
+			queue_flag |= ENQUEUE_REPLENISH;
 		} else
 			p->dl.dl_boosted = 0;
 		p->sched_class = &dl_sched_class;
@@ -3524,7 +3528,7 @@ void rt_mutex_setprio(struct task_struct
 		if (dl_prio(oldprio))
 			p->dl.dl_boosted = 0;
 		if (oldprio < prio)
-			enqueue_flag |= ENQUEUE_HEAD;
+			queue_flag |= ENQUEUE_HEAD;
 		p->sched_class = &rt_sched_class;
 	} else {
 		if (dl_prio(oldprio))
@@ -3539,7 +3543,7 @@ void rt_mutex_setprio(struct task_struct
 	if (running)
 		p->sched_class->set_curr_task(rq);
 	if (queued)
-		enqueue_task(rq, p, enqueue_flag);
+		enqueue_task(rq, p, queue_flag);
 
 	check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
@@ -3895,6 +3899,7 @@ static int __sched_setscheduler(struct t
 	const struct sched_class *prev_class;
 	struct rq *rq;
 	int reset_on_fork;
+	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
 
 	/* may grab non-irq protected spin_locks */
 	BUG_ON(in_interrupt());
@@ -4077,17 +4082,14 @@ static int __sched_setscheduler(struct t
 		 * itself.
 		 */
 		new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
-		if (new_effective_prio == oldprio) {
-			__setscheduler_params(p, attr);
-			task_rq_unlock(rq, p, &flags);
-			return 0;
-		}
+		if (new_effective_prio == oldprio)
+			queue_flags &= ~DEQUEUE_MOVE;
 	}
 
 	queued = task_on_rq_queued(p);
 	running = task_current(rq, p);
 	if (queued)
-		dequeue_task(rq, p, DEQUEUE_SAVE);
+		dequeue_task(rq, p, queue_flags);
 	if (running)
 		put_prev_task(rq, p);
 
@@ -4097,15 +4099,14 @@ static int __sched_setscheduler(struct t
 	if (running)
 		p->sched_class->set_curr_task(rq);
 	if (queued) {
-		int enqueue_flags = ENQUEUE_RESTORE;
 		/*
 		 * We enqueue to tail when the priority of a task is
 		 * increased (user space view).
 		 */
-		if (oldprio <= p->prio)
-			enqueue_flags |= ENQUEUE_HEAD;
+		if (oldprio < p->prio)
+			queue_flags |= ENQUEUE_HEAD;
 
-		enqueue_task(rq, p, enqueue_flags);
+		enqueue_task(rq, p, queue_flags);
 	}
 
 	check_class_changed(rq, p, prev_class, oldprio);
@@ -7890,7 +7891,7 @@ void sched_move_task(struct task_struct
 	queued = task_on_rq_queued(tsk);
 
 	if (queued)
-		dequeue_task(rq, tsk, DEQUEUE_SAVE);
+		dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
 	if (unlikely(running))
 		put_prev_task(rq, tsk);
 
@@ -7914,7 +7915,7 @@ void sched_move_task(struct task_struct
 	if (unlikely(running))
 		tsk->sched_class->set_curr_task(rq);
 	if (queued)
-		enqueue_task(rq, tsk, ENQUEUE_RESTORE);
+		enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
 
 	task_rq_unlock(rq, tsk, &flags);
 }
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -436,7 +436,7 @@ static void dequeue_top_rt_rq(struct rt_
 
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
-	return !list_empty(&rt_se->run_list);
+	return rt_se->on_rq;
 }
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -482,8 +482,8 @@ static inline struct rt_rq *group_rt_rq(
 	return rt_se->my_q;
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
-static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
@@ -499,7 +499,7 @@ static void sched_rt_rq_enqueue(struct r
 		if (!rt_se)
 			enqueue_top_rt_rq(rt_rq);
 		else if (!on_rt_rq(rt_se))
-			enqueue_rt_entity(rt_se, false);
+			enqueue_rt_entity(rt_se, 0);
 
 		if (rt_rq->highest_prio.curr < curr->prio)
 			resched_curr(rq);
@@ -516,7 +516,7 @@ static void sched_rt_rq_dequeue(struct r
 	if (!rt_se)
 		dequeue_top_rt_rq(rt_rq);
 	else if (on_rt_rq(rt_se))
-		dequeue_rt_entity(rt_se);
+		dequeue_rt_entity(rt_se, 0);
 }
 
 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
@@ -1166,7 +1166,30 @@ void dec_rt_tasks(struct sched_rt_entity
 	dec_rt_group(rt_se, rt_rq);
 }
 
-static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
+/*
+ * Change rt_se->run_list location unless SAVE && !MOVE
+ *
+ * assumes ENQUEUE/DEQUEUE flags match
+ */
+static inline bool move_entity(unsigned int flags)
+{
+	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
+		return false;
+
+	return true;
+}
+
+static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
+{
+	list_del_init(&rt_se->run_list);
+
+	if (list_empty(array->queue + rt_se_prio(rt_se)))
+		__clear_bit(rt_se_prio(rt_se), array->bitmap);
+
+	rt_se->on_list = 0;
+}
+
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 	struct rt_prio_array *array = &rt_rq->active;
@@ -1179,26 +1202,37 @@ static void __enqueue_rt_entity(struct s
 	 * get throttled and the current group doesn't have any other
 	 * active members.
 	 */
-	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
+	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
+		if (rt_se->on_list)
+			__delist_rt_entity(rt_se, array);
 		return;
+	}
 
-	if (head)
-		list_add(&rt_se->run_list, queue);
-	else
-		list_add_tail(&rt_se->run_list, queue);
-	__set_bit(rt_se_prio(rt_se), array->bitmap);
+	if (move_entity(flags)) {
+		WARN_ON_ONCE(rt_se->on_list);
+		if (flags & ENQUEUE_HEAD)
+			list_add(&rt_se->run_list, queue);
+		else
+			list_add_tail(&rt_se->run_list, queue);
+
+		__set_bit(rt_se_prio(rt_se), array->bitmap);
+		rt_se->on_list = 1;
+	}
+	rt_se->on_rq = 1;
 
 	inc_rt_tasks(rt_se, rt_rq);
 }
 
-static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
+static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 	struct rt_prio_array *array = &rt_rq->active;
 
-	list_del_init(&rt_se->run_list);
-	if (list_empty(array->queue + rt_se_prio(rt_se)))
-		__clear_bit(rt_se_prio(rt_se), array->bitmap);
+	if (move_entity(flags)) {
+		WARN_ON_ONCE(!rt_se->on_list);
+		__delist_rt_entity(rt_se, array);
+	}
+	rt_se->on_rq = 0;
 
 	dec_rt_tasks(rt_se, rt_rq);
 }
@@ -1207,7 +1241,7 @@ static void __dequeue_rt_entity(struct s
  * Because the prio of an upper entry depends on the lower
  * entries, we must remove entries top - down.
  */
-static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
+static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct sched_rt_entity *back = NULL;
 
@@ -1220,31 +1254,31 @@ static void dequeue_rt_stack(struct sche
 
 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
 		if (on_rt_rq(rt_se))
-			__dequeue_rt_entity(rt_se);
+			__dequeue_rt_entity(rt_se, flags);
 	}
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rq *rq = rq_of_rt_se(rt_se);
 
-	dequeue_rt_stack(rt_se);
+	dequeue_rt_stack(rt_se, flags);
 	for_each_sched_rt_entity(rt_se)
-		__enqueue_rt_entity(rt_se, head);
+		__enqueue_rt_entity(rt_se, flags);
 	enqueue_top_rt_rq(&rq->rt);
 }
 
-static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rq *rq = rq_of_rt_se(rt_se);
 
-	dequeue_rt_stack(rt_se);
+	dequeue_rt_stack(rt_se, flags);
 
 	for_each_sched_rt_entity(rt_se) {
 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
 		if (rt_rq && rt_rq->rt_nr_running)
-			__enqueue_rt_entity(rt_se, false);
+			__enqueue_rt_entity(rt_se, flags);
 	}
 	enqueue_top_rt_rq(&rq->rt);
 }
@@ -1260,7 +1294,7 @@ enqueue_task_rt(struct rq *rq, struct ta
 	if (flags & ENQUEUE_WAKEUP)
 		rt_se->timeout = 0;
 
-	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
+	enqueue_rt_entity(rt_se, flags);
 
 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
 		enqueue_pushable_task(rq, p);
@@ -1271,7 +1305,7 @@ static void dequeue_task_rt(struct rq *r
 	struct sched_rt_entity *rt_se = &p->rt;
 
 	update_curr_rt(rq);
-	dequeue_rt_entity(rt_se);
+	dequeue_rt_entity(rt_se, flags);
 
 	dequeue_pushable_task(rq, p);
 }
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1130,18 +1130,35 @@ static inline void finish_lock_switch(st
 extern const int sched_prio_to_weight[40];
 extern const u32 sched_prio_to_wmult[40];
 
+/*
+ * {de,en}queue flags:
+ *
+ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
+ *                are in a known state which allows modification. Such pairs
+ *                should preserve as much state as possible.
+ *
+ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
+ *        in the runqueue.
+ *
+ * ENQUEUE_HEAD - place at front of runqueue (tail if not specificed)
+ *
+ */
+
+#define DEQUEUE_SLEEP		0x01
+#define DEQUEUE_SAVE		0x02 /* matches ENQUEUE_RESTORE */
+#define DEQUEUE_MOVE		0x04 /* matches ENQUEUE_MOVE */
+
 #define ENQUEUE_WAKEUP		0x01
-#define ENQUEUE_HEAD		0x02
+#define ENQUEUE_RESTORE		0x02
+#define ENQUEUE_MOVE		0x04
+
+#define ENQUEUE_HEAD		0x08
+#define ENQUEUE_REPLENISH	0x10
 #ifdef CONFIG_SMP
-#define ENQUEUE_WAKING		0x04	/* sched_class::task_waking was called */
+#define ENQUEUE_WAKING		0x20	/* sched_class::task_waking was called */
 #else
 #define ENQUEUE_WAKING		0x00
 #endif
-#define ENQUEUE_REPLENISH	0x08
-#define ENQUEUE_RESTORE	0x10
-
-#define DEQUEUE_SLEEP		0x01
-#define DEQUEUE_SAVE		0x02
 
 #define RETRY_TASK		((void *)-1UL)
 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] sched: Fix PI handling vs sched_setscheduler()
  2016-01-19 11:18 [PATCH] sched: Fix PI handling vs sched_setscheduler() Peter Zijlstra
@ 2016-01-20 10:08 ` Juri Lelli
  2016-01-20 10:24   ` Peter Zijlstra
  0 siblings, 1 reply; 5+ messages in thread
From: Juri Lelli @ 2016-01-20 10:08 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, linux-kernel, Thomas Gleixner, Steven Rostedt, Juri Lelli,
	Andrea Parri

Hi,

On 19/01/16 12:18, Peter Zijlstra wrote:

[...]

> @@ -4097,15 +4099,14 @@ static int __sched_setscheduler(struct t
>  	if (running)
>  		p->sched_class->set_curr_task(rq);
>  	if (queued) {
> -		int enqueue_flags = ENQUEUE_RESTORE;
>  		/*
>  		 * We enqueue to tail when the priority of a task is
>  		 * increased (user space view).
>  		 */
> -		if (oldprio <= p->prio)
> -			enqueue_flags |= ENQUEUE_HEAD;
> +		if (oldprio < p->prio)
> +			queue_flags |= ENQUEUE_HEAD;

Was this condition broken before or it needs to be changed now with this
patch?

[...]

> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1130,18 +1130,35 @@ static inline void finish_lock_switch(st
>  extern const int sched_prio_to_weight[40];
>  extern const u32 sched_prio_to_wmult[40];
>  
> +/*
> + * {de,en}queue flags:
> + *
> + * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
> + *                are in a known state which allows modification. Such pairs
> + *                should preserve as much state as possible.
> + *
> + * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
> + *        in the runqueue.
> + *
> + * ENQUEUE_HEAD - place at front of runqueue (tail if not specificed)
> + *
> + */

Do we want to document all the flags while we are at it? :)

Best,

- Juri

> +
> +#define DEQUEUE_SLEEP		0x01
> +#define DEQUEUE_SAVE		0x02 /* matches ENQUEUE_RESTORE */
> +#define DEQUEUE_MOVE		0x04 /* matches ENQUEUE_MOVE */
> +
>  #define ENQUEUE_WAKEUP		0x01
> -#define ENQUEUE_HEAD		0x02
> +#define ENQUEUE_RESTORE		0x02
> +#define ENQUEUE_MOVE		0x04
> +
> +#define ENQUEUE_HEAD		0x08
> +#define ENQUEUE_REPLENISH	0x10
>  #ifdef CONFIG_SMP
> -#define ENQUEUE_WAKING		0x04	/* sched_class::task_waking was called */
> +#define ENQUEUE_WAKING		0x20	/* sched_class::task_waking was called */
>  #else
>  #define ENQUEUE_WAKING		0x00
>  #endif
> -#define ENQUEUE_REPLENISH	0x08
> -#define ENQUEUE_RESTORE	0x10
> -
> -#define DEQUEUE_SLEEP		0x01
> -#define DEQUEUE_SAVE		0x02
>  
>  #define RETRY_TASK		((void *)-1UL)
>  
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] sched: Fix PI handling vs sched_setscheduler()
  2016-01-20 10:08 ` Juri Lelli
@ 2016-01-20 10:24   ` Peter Zijlstra
  2016-01-20 10:34     ` Juri Lelli
  0 siblings, 1 reply; 5+ messages in thread
From: Peter Zijlstra @ 2016-01-20 10:24 UTC (permalink / raw)
  To: Juri Lelli
  Cc: mingo, linux-kernel, Thomas Gleixner, Steven Rostedt, Juri Lelli,
	Andrea Parri

On Wed, Jan 20, 2016 at 10:08:39AM +0000, Juri Lelli wrote:
> Hi,
> 
> On 19/01/16 12:18, Peter Zijlstra wrote:
> 
> [...]
> 
> > @@ -4097,15 +4099,14 @@ static int __sched_setscheduler(struct t
> >  	if (running)
> >  		p->sched_class->set_curr_task(rq);
> >  	if (queued) {
> > -		int enqueue_flags = ENQUEUE_RESTORE;
> >  		/*
> >  		 * We enqueue to tail when the priority of a task is
> >  		 * increased (user space view).
> >  		 */
> > -		if (oldprio <= p->prio)
> > -			enqueue_flags |= ENQUEUE_HEAD;
> > +		if (oldprio < p->prio)
> > +			queue_flags |= ENQUEUE_HEAD;
> 
> Was this condition broken before or it needs to be changed now with this
> patch?

I could not see how we could get there before, seeing how the == case is
handled above.

> > --- a/kernel/sched/sched.h
> > +++ b/kernel/sched/sched.h
> > @@ -1130,18 +1130,35 @@ static inline void finish_lock_switch(st
> >  extern const int sched_prio_to_weight[40];
> >  extern const u32 sched_prio_to_wmult[40];
> >  
> > +/*
> > + * {de,en}queue flags:
> > + *
> > + * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
> > + *                are in a known state which allows modification. Such pairs
> > + *                should preserve as much state as possible.
> > + *
> > + * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
> > + *        in the runqueue.
> > + *
> > + * ENQUEUE_HEAD - place at front of runqueue (tail if not specificed)
> > + *
> > + */
> 
> Do we want to document all the flags while we are at it? :)

Probably.. does the below work? I think I bailed on replenish because I
could not come up with a coherent short description, does the below make
any sense?


--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1140,7 +1140,9 @@ extern const u32 sched_prio_to_wmult[40]
  * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
  *        in the runqueue.
  *
- * ENQUEUE_HEAD - place at front of runqueue (tail if not specificed)
+ * ENQUEUE_HEAD      - place at front of runqueue (tail if not specificed)
+ * ENQUEUE_REPLENISH - push the CBS slot forward
+ * ENQUEUE_WAKING    - sched_class::task_waking was called
  *
  */
 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] sched: Fix PI handling vs sched_setscheduler()
  2016-01-20 10:24   ` Peter Zijlstra
@ 2016-01-20 10:34     ` Juri Lelli
  2016-01-20 11:19       ` Peter Zijlstra
  0 siblings, 1 reply; 5+ messages in thread
From: Juri Lelli @ 2016-01-20 10:34 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, linux-kernel, Thomas Gleixner, Steven Rostedt, Juri Lelli,
	Andrea Parri

On 20/01/16 11:24, Peter Zijlstra wrote:
> On Wed, Jan 20, 2016 at 10:08:39AM +0000, Juri Lelli wrote:
> > Hi,
> > 
> > On 19/01/16 12:18, Peter Zijlstra wrote:
> > 
> > [...]
> > 
> > > @@ -4097,15 +4099,14 @@ static int __sched_setscheduler(struct t
> > >  	if (running)
> > >  		p->sched_class->set_curr_task(rq);
> > >  	if (queued) {
> > > -		int enqueue_flags = ENQUEUE_RESTORE;
> > >  		/*
> > >  		 * We enqueue to tail when the priority of a task is
> > >  		 * increased (user space view).
> > >  		 */
> > > -		if (oldprio <= p->prio)
> > > -			enqueue_flags |= ENQUEUE_HEAD;
> > > +		if (oldprio < p->prio)
> > > +			queue_flags |= ENQUEUE_HEAD;
> > 
> > Was this condition broken before or it needs to be changed now with this
> > patch?
> 
> I could not see how we could get there before, seeing how the == case is
> handled above.
> 
> > > --- a/kernel/sched/sched.h
> > > +++ b/kernel/sched/sched.h
> > > @@ -1130,18 +1130,35 @@ static inline void finish_lock_switch(st
> > >  extern const int sched_prio_to_weight[40];
> > >  extern const u32 sched_prio_to_wmult[40];
> > >  
> > > +/*
> > > + * {de,en}queue flags:
> > > + *
> > > + * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
> > > + *                are in a known state which allows modification. Such pairs
> > > + *                should preserve as much state as possible.
> > > + *
> > > + * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
> > > + *        in the runqueue.
> > > + *
> > > + * ENQUEUE_HEAD - place at front of runqueue (tail if not specificed)
> > > + *
> > > + */
> > 
> > Do we want to document all the flags while we are at it? :)
> 
> Probably.. does the below work? I think I bailed on replenish because I
> could not come up with a coherent short description, does the below make
> any sense?
> 
> 
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1140,7 +1140,9 @@ extern const u32 sched_prio_to_wmult[40]
>   * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
>   *        in the runqueue.
>   *
> - * ENQUEUE_HEAD - place at front of runqueue (tail if not specificed)
> + * ENQUEUE_HEAD      - place at front of runqueue (tail if not specificed)
> + * ENQUEUE_REPLENISH - push the CBS slot forward


 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)

Maybe? :)

> + * ENQUEUE_WAKING    - sched_class::task_waking was called
>   *
>   */
>  
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] sched: Fix PI handling vs sched_setscheduler()
  2016-01-20 10:34     ` Juri Lelli
@ 2016-01-20 11:19       ` Peter Zijlstra
  0 siblings, 0 replies; 5+ messages in thread
From: Peter Zijlstra @ 2016-01-20 11:19 UTC (permalink / raw)
  To: Juri Lelli
  Cc: mingo, linux-kernel, Thomas Gleixner, Steven Rostedt, Juri Lelli,
	Andrea Parri

On Wed, Jan 20, 2016 at 10:34:21AM +0000, Juri Lelli wrote:
> On 20/01/16 11:24, Peter Zijlstra wrote:
> > + * ENQUEUE_REPLENISH - push the CBS slot forward
> 
> 
>  * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
> 
> Maybe? :)

Done, thanks!

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-01-20 11:19 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-19 11:18 [PATCH] sched: Fix PI handling vs sched_setscheduler() Peter Zijlstra
2016-01-20 10:08 ` Juri Lelli
2016-01-20 10:24   ` Peter Zijlstra
2016-01-20 10:34     ` Juri Lelli
2016-01-20 11:19       ` Peter Zijlstra

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.