All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio
@ 2015-04-09  3:27 Xunlei Pang
  2015-04-09  3:27 ` [PATCH v5 2/3] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases Xunlei Pang
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Xunlei Pang @ 2015-04-09  3:27 UTC (permalink / raw)
  To: linux-kernel
  Cc: Peter Zijlstra, Steven Rostedt, Juri Lelli, Dan Streetman, Xunlei Pang

From: Xunlei Pang <pang.xunlei@linaro.org>

If there're multiple nodes with the same prio as @node, currently
plist_add() will add @node behind all of them. Now we need to add
@node before all of these nodes for SMP RT scheduler.

This patch adds a common __plist_add() for adding @node before or
after existing nodes with the same prio, then adds plist_add_head()
and plist_add_tail() inline wrapper functions for convenient uses.

Finally, define plist_add() as plist_add_tail() which has the same
behaviour as before.

Reviewed-by: Dan Streetman <ddstreet@ieee.org>
Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
---
 include/linux/plist.h | 30 +++++++++++++++++++++++++++++-
 lib/plist.c           | 22 +++++++++++++++++++---
 2 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/include/linux/plist.h b/include/linux/plist.h
index 9788360..10c834c 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -138,7 +138,35 @@ static inline void plist_node_init(struct plist_node *node, int prio)
 	INIT_LIST_HEAD(&node->node_list);
 }
 
-extern void plist_add(struct plist_node *node, struct plist_head *head);
+extern void __plist_add(struct plist_node *node,
+				struct plist_head *head, bool is_head);
+
+/**
+ * plist_add_head - add @node to @head, before all existing same-prio nodes
+ *
+ * @node:	&struct plist_node pointer
+ * @head:	&struct plist_head pointer
+ */
+static inline
+void plist_add_head(struct plist_node *node, struct plist_head *head)
+{
+	__plist_add(node, head, true);
+}
+
+/**
+ * plist_add_tail - add @node to @head, after all existing same-prio nodes
+ *
+ * @node:	&struct plist_node pointer
+ * @head:	&struct plist_head pointer
+ */
+static inline
+void plist_add_tail(struct plist_node *node, struct plist_head *head)
+{
+	__plist_add(node, head, false);
+}
+
+#define plist_add plist_add_tail
+
 extern void plist_del(struct plist_node *node, struct plist_head *head);
 
 extern void plist_requeue(struct plist_node *node, struct plist_head *head);
diff --git a/lib/plist.c b/lib/plist.c
index 3a30c53..9f396f1 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -66,12 +66,16 @@ static void plist_check_head(struct plist_head *head)
 #endif
 
 /**
- * plist_add - add @node to @head
+ * __plist_add - add @node to @head
  *
  * @node:	&struct plist_node pointer
  * @head:	&struct plist_head pointer
+ * @is_head:	bool
+ *
+ * If there're any nodes with the same prio, add @node
+ * behind or before all of them according to @is_head.
  */
-void plist_add(struct plist_node *node, struct plist_head *head)
+void __plist_add(struct plist_node *node, struct plist_head *head, bool is_head)
 {
 	struct plist_node *first, *iter, *prev = NULL;
 	struct list_head *node_next = &head->node_list;
@@ -96,8 +100,20 @@ void plist_add(struct plist_node *node, struct plist_head *head)
 				struct plist_node, prio_list);
 	} while (iter != first);
 
-	if (!prev || prev->prio != node->prio)
+	if (!prev || prev->prio != node->prio) {
 		list_add_tail(&node->prio_list, &iter->prio_list);
+	} else if (is_head) {
+		/*
+		 * prev has the same priority as the node that is being
+		 * added. It is also the first node for this priority,
+		 * but the new node needs to be added ahead of it.
+		 * To accomplish this, replace prev in the prio_list
+		 * with node. Then set node_next to prev->node_list so
+		 * that the new node gets added before prev and not iter.
+		 */
+		list_replace_init(&prev->prio_list, &node->prio_list);
+		node_next = &prev->node_list;
+	}
 ins_node:
 	list_add_tail(&node->node_list, node_next);
 
-- 
1.9.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v5 2/3] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases
  2015-04-09  3:27 [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio Xunlei Pang
@ 2015-04-09  3:27 ` Xunlei Pang
  2015-04-09 13:56   ` Steven Rostedt
  2015-04-09  3:27 ` [PATCH v5 3/3] sched/rt: Check to push the task when changing its affinity Xunlei Pang
  2015-04-09 13:43 ` [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio Steven Rostedt
  2 siblings, 1 reply; 5+ messages in thread
From: Xunlei Pang @ 2015-04-09  3:27 UTC (permalink / raw)
  To: linux-kernel
  Cc: Peter Zijlstra, Steven Rostedt, Juri Lelli, Dan Streetman, Xunlei Pang

From: Xunlei Pang <pang.xunlei@linaro.org>

Currently, SMP RT scheduler has some trouble in dealing with
equal prio cases.

For example, in check_preempt_equal_prio():
When RT1(current task) gets preempted by RT2, if there is a
migratable RT3 with same prio, RT3 will be pushed away instead
of RT1 afterwards, because RT1 will be enqueued to the tail of
the pushable list when going through succeeding put_prev_task_rt()
triggered by resched. This broke FIFO.

Furthermore, this is also problematic for normal preempted cases
if there're some rt tasks queued with the same prio as current.
Because current will be put behind these tasks in the pushable
queue.

So, if a task is running and gets preempted by a higher priority
task (or even with same priority for migrating), this patch ensures
that it is put ahead of any existing task with the same priority in
the pushable queue.

Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
---
 kernel/sched/rt.c | 26 ++++++++++++++++++++------
 1 file changed, 20 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 575da76..402162a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -359,11 +359,15 @@ static inline void set_post_schedule(struct rq *rq)
 	rq->post_schedule = has_pushable_tasks(rq);
 }
 
-static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+static void
+enqueue_pushable_task(struct rq *rq, struct task_struct *p, bool head)
 {
 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 	plist_node_init(&p->pushable_tasks, p->prio);
-	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+	if (head)
+		plist_add_head(&p->pushable_tasks, &rq->rt.pushable_tasks);
+	else
+		plist_add_tail(&p->pushable_tasks, &rq->rt.pushable_tasks);
 
 	/* Update the highest prio pushable task */
 	if (p->prio < rq->rt.highest_prio.next)
@@ -385,7 +389,8 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 
 #else
 
-static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+static inline
+void enqueue_pushable_task(struct rq *rq, struct task_struct *p, bool head)
 {
 }
 
@@ -1260,7 +1265,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
 
 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
-		enqueue_pushable_task(rq, p);
+		enqueue_pushable_task(rq, p, false);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1507,7 +1512,16 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 	 * if it is still active
 	 */
 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
-		enqueue_pushable_task(rq, p);
+		/*
+		 * put_prev_task_rt() is called by many functions,
+		 * pick_next_task_rt() is the only one may have
+		 * PREEMPT_ACTIVE set. So if detecting p(current
+		 * task) is preempted in such case, we should
+		 * enqueue it to the front of the pushable plist,
+		 * as there may be multiple tasks with the same
+		 * priority as p.
+		 */
+		enqueue_pushable_task(rq, p, !!(preempt_count() & PREEMPT_ACTIVE));
 }
 
 #ifdef CONFIG_SMP
@@ -2091,7 +2105,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
 		rq->rt.rt_nr_migratory--;
 	} else {
 		if (!task_current(rq, p))
-			enqueue_pushable_task(rq, p);
+			enqueue_pushable_task(rq, p, false);
 		rq->rt.rt_nr_migratory++;
 	}
 
-- 
1.9.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v5 3/3] sched/rt: Check to push the task when changing its affinity
  2015-04-09  3:27 [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio Xunlei Pang
  2015-04-09  3:27 ` [PATCH v5 2/3] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases Xunlei Pang
@ 2015-04-09  3:27 ` Xunlei Pang
  2015-04-09 13:43 ` [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio Steven Rostedt
  2 siblings, 0 replies; 5+ messages in thread
From: Xunlei Pang @ 2015-04-09  3:27 UTC (permalink / raw)
  To: linux-kernel
  Cc: Peter Zijlstra, Steven Rostedt, Juri Lelli, Dan Streetman, Xunlei Pang

From: Xunlei Pang <pang.xunlei@linaro.org>

We may suffer from extra rt overload rq due to the affinity,
so when the affinity of any runnable rt task is changed, we
should check to trigger balancing, otherwise it will cause
some unnecessary delayed real-time response. Unfortunately,
current RT global scheduler doesn't trigger anything.

For example: a 2-cpu system with two runnable FIFO tasks(same
rt_priority) bound on CPU0, let's name them rt1(running) and
rt2(runnable) respectively; CPU1 has no RTs. Then, someone sets
the affinity of rt2 to 0x3(i.e. CPU0 and CPU1), but after this,
rt2 still can't be scheduled until rt1 enters schedule(), this
definitely causes some/big response latency for rt2.

So, when doing set_cpus_allowed_rt(), if detecting such cases,
check to trigger a push behaviour.

Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
---
 kernel/sched/rt.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 71 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 402162a..9704ed3 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1445,10 +1445,9 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
 	return next;
 }
 
-static struct task_struct *_pick_next_task_rt(struct rq *rq)
+static struct task_struct *peek_next_task_rt(struct rq *rq)
 {
 	struct sched_rt_entity *rt_se;
-	struct task_struct *p;
 	struct rt_rq *rt_rq  = &rq->rt;
 
 	do {
@@ -1457,7 +1456,14 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
 		rt_rq = group_rt_rq(rt_se);
 	} while (rt_rq);
 
-	p = rt_task_of(rt_se);
+	return rt_task_of(rt_se);
+}
+
+static inline struct task_struct *_pick_next_task_rt(struct rq *rq)
+{
+	struct task_struct *p;
+
+	p = peek_next_task_rt(rq);
 	p->se.exec_start = rq_clock_task(rq);
 
 	return p;
@@ -2077,28 +2083,77 @@ static void set_cpus_allowed_rt(struct task_struct *p,
 				const struct cpumask *new_mask)
 {
 	struct rq *rq;
-	int weight;
+	int old_weight, new_weight;
+	int preempt_push = 0, direct_push = 0;
 
 	BUG_ON(!rt_task(p));
 
 	if (!task_on_rq_queued(p))
 		return;
 
-	weight = cpumask_weight(new_mask);
+	old_weight = p->nr_cpus_allowed;
+	new_weight = cpumask_weight(new_mask);
 
+	rq = task_rq(p);
+
+	if (new_weight > 1 &&
+	    rt_task(rq->curr) &&
+	    rq->rt.rt_nr_total > 1 &&
+	    !test_tsk_need_resched(rq->curr)) {
+		/*
+		 * We own p->pi_lock and rq->lock. rq->lock might
+		 * get released when doing direct pushing, however
+		 * p->pi_lock is always held, so it's safe to assign
+		 * new_mask and new_weight to p below.
+		 */
+		if (!task_running(rq, p)) {
+			cpumask_copy(&p->cpus_allowed, new_mask);
+			p->nr_cpus_allowed = new_weight;
+			direct_push = 1;
+		} else if (cpumask_test_cpu(task_cpu(p), new_mask)) {
+			struct task_struct *next;
+
+			cpumask_copy(&p->cpus_allowed, new_mask);
+			p->nr_cpus_allowed = new_weight;
+			if (!cpupri_find(&rq->rd->cpupri, p, NULL))
+				goto update;
+
+			/*
+			 * At this point, current task gets migratable most
+			 * likely due to the change of its affinity, let's
+			 * figure out if we can migrate it.
+			 *
+			 * Can we find any task with the same priority as
+			 * current? To accomplish this, firstly we requeue
+			 * current to the tail and peek next, then restore
+			 * current to the head.
+			 */
+			requeue_task_rt(rq, p, 0);
+			next = peek_next_task_rt(rq);
+			requeue_task_rt(rq, p, 1);
+			if (next != p && next->prio == p->prio) {
+				/*
+				 * Target found, so let's reschedule to try
+				 * and push current away.
+				 */
+				requeue_task_rt(rq, next, 1);
+				preempt_push = 1;
+			}
+		}
+	}
+
+update:
 	/*
 	 * Only update if the process changes its state from whether it
 	 * can migrate or not.
 	 */
-	if ((p->nr_cpus_allowed > 1) == (weight > 1))
-		return;
-
-	rq = task_rq(p);
+	if ((old_weight > 1) == (new_weight > 1))
+		goto out;
 
 	/*
 	 * The process used to be able to migrate OR it can now migrate
 	 */
-	if (weight <= 1) {
+	if (new_weight <= 1) {
 		if (!task_current(rq, p))
 			dequeue_pushable_task(rq, p);
 		BUG_ON(!rq->rt.rt_nr_migratory);
@@ -2110,6 +2165,12 @@ static void set_cpus_allowed_rt(struct task_struct *p,
 	}
 
 	update_rt_migration(&rq->rt);
+
+out:
+	if (direct_push)
+		push_rt_tasks(rq);
+	else if (preempt_push)
+		resched_curr(rq);
 }
 
 /* Assumes rq->lock is held */
-- 
1.9.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio
  2015-04-09  3:27 [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio Xunlei Pang
  2015-04-09  3:27 ` [PATCH v5 2/3] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases Xunlei Pang
  2015-04-09  3:27 ` [PATCH v5 3/3] sched/rt: Check to push the task when changing its affinity Xunlei Pang
@ 2015-04-09 13:43 ` Steven Rostedt
  2 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2015-04-09 13:43 UTC (permalink / raw)
  To: Xunlei Pang
  Cc: linux-kernel, Peter Zijlstra, Juri Lelli, Dan Streetman, Xunlei Pang

On Thu,  9 Apr 2015 11:27:16 +0800
Xunlei Pang <xlpang@126.com> wrote:

> From: Xunlei Pang <pang.xunlei@linaro.org>
> 
> If there're multiple nodes with the same prio as @node, currently
> plist_add() will add @node behind all of them. Now we need to add
> @node before all of these nodes for SMP RT scheduler.
> 
> This patch adds a common __plist_add() for adding @node before or
> after existing nodes with the same prio, then adds plist_add_head()
> and plist_add_tail() inline wrapper functions for convenient uses.
> 
> Finally, define plist_add() as plist_add_tail() which has the same
> behaviour as before.
> 
> Reviewed-by: Dan Streetman <ddstreet@ieee.org>
> Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
> ---
>  include/linux/plist.h | 30 +++++++++++++++++++++++++++++-
>  lib/plist.c           | 22 +++++++++++++++++++---
>  2 files changed, 48 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/plist.h b/include/linux/plist.h
> index 9788360..10c834c 100644
> --- a/include/linux/plist.h
> +++ b/include/linux/plist.h
> @@ -138,7 +138,35 @@ static inline void plist_node_init(struct plist_node *node, int prio)
>  	INIT_LIST_HEAD(&node->node_list);
>  }
>  
> -extern void plist_add(struct plist_node *node, struct plist_head *head);
> +extern void __plist_add(struct plist_node *node,
> +				struct plist_head *head, bool is_head);
> +
> +/**
> + * plist_add_head - add @node to @head, before all existing same-prio nodes
> + *
> + * @node:	&struct plist_node pointer
> + * @head:	&struct plist_head pointer

I just noticed that the plist kerneldoc is really pathetic. Not your
fault, but lets not continue the trend. The above descriptions of the
@node and @head are really lame. It adds nothing to the prototype. It's
equivalent to:

	/* increment i */
	i++;

Something like this is better:

 * @node:  The plist_node to be added to @head
 * @head:  The plist_head that @node is being added to

The oneline description you have above is good. Ideally, we should have
a bit more detailed description here too. But for now we can leave it.
The rest of the @node and @head's should be fixed too.


> + */
> +static inline
> +void plist_add_head(struct plist_node *node, struct plist_head *head)
> +{
> +	__plist_add(node, head, true);
> +}
> +
> +/**
> + * plist_add_tail - add @node to @head, after all existing same-prio nodes
> + *
> + * @node:	&struct plist_node pointer
> + * @head:	&struct plist_head pointer
> + */
> +static inline
> +void plist_add_tail(struct plist_node *node, struct plist_head *head)
> +{
> +	__plist_add(node, head, false);
> +}
> +
> +#define plist_add plist_add_tail
> +
>  extern void plist_del(struct plist_node *node, struct plist_head *head);
>  
>  extern void plist_requeue(struct plist_node *node, struct plist_head *head);
> diff --git a/lib/plist.c b/lib/plist.c
> index 3a30c53..9f396f1 100644
> --- a/lib/plist.c
> +++ b/lib/plist.c
> @@ -66,12 +66,16 @@ static void plist_check_head(struct plist_head *head)
>  #endif
>  
>  /**
> - * plist_add - add @node to @head
> + * __plist_add - add @node to @head
>   *
>   * @node:	&struct plist_node pointer
>   * @head:	&struct plist_head pointer
> + * @is_head:	bool

As I said above. This horrible kerneldoc was there before and is not
your fault. But if you are going to touch it, lets fix it up first.

 * @is_head: True if adding to head of prio list, false otherwise


> + *
> + * If there're any nodes with the same prio, add @node
> + * behind or before all of them according to @is_head.

The above is very lacking if I want to use this, or know how to use it.
It does not tell me what @is_head must be to add before or behind them.

 * For nodes of the same prio, @node will be added at the head of
 * previously added nodes if @is_head is true, or it will be added
 * at the tail of previously added nodes if @is_head is false.

That will be much more descriptive for people looking at this code for
the first time.

The rest of the code looks fine.

-- Steve

>   */
> -void plist_add(struct plist_node *node, struct plist_head *head)
> +void __plist_add(struct plist_node *node, struct plist_head *head, bool is_head)
>  {
>  	struct plist_node *first, *iter, *prev = NULL;
>  	struct list_head *node_next = &head->node_list;
> @@ -96,8 +100,20 @@ void plist_add(struct plist_node *node, struct plist_head *head)
>  				struct plist_node, prio_list);
>  	} while (iter != first);
>  
> -	if (!prev || prev->prio != node->prio)
> +	if (!prev || prev->prio != node->prio) {
>  		list_add_tail(&node->prio_list, &iter->prio_list);
> +	} else if (is_head) {
> +		/*
> +		 * prev has the same priority as the node that is being
> +		 * added. It is also the first node for this priority,
> +		 * but the new node needs to be added ahead of it.
> +		 * To accomplish this, replace prev in the prio_list
> +		 * with node. Then set node_next to prev->node_list so
> +		 * that the new node gets added before prev and not iter.
> +		 */
> +		list_replace_init(&prev->prio_list, &node->prio_list);
> +		node_next = &prev->node_list;
> +	}
>  ins_node:
>  	list_add_tail(&node->node_list, node_next);
>  


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v5 2/3] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases
  2015-04-09  3:27 ` [PATCH v5 2/3] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases Xunlei Pang
@ 2015-04-09 13:56   ` Steven Rostedt
  0 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2015-04-09 13:56 UTC (permalink / raw)
  To: Xunlei Pang
  Cc: linux-kernel, Peter Zijlstra, Juri Lelli, Dan Streetman, Xunlei Pang

On Thu,  9 Apr 2015 11:27:17 +0800
Xunlei Pang <xlpang@126.com> wrote:


> Suggested-by: Steven Rostedt <rostedt@goodmis.org>
> Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
> ---
>  kernel/sched/rt.c | 26 ++++++++++++++++++++------
>  1 file changed, 20 insertions(+), 6 deletions(-)
> 
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 575da76..402162a 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -359,11 +359,15 @@ static inline void set_post_schedule(struct rq *rq)
>  	rq->post_schedule = has_pushable_tasks(rq);
>  }
>  
> -static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
> +static void
> +enqueue_pushable_task(struct rq *rq, struct task_struct *p, bool head)
>  {
>  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
>  	plist_node_init(&p->pushable_tasks, p->prio);
> -	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
> +	if (head)
> +		plist_add_head(&p->pushable_tasks, &rq->rt.pushable_tasks);
> +	else
> +		plist_add_tail(&p->pushable_tasks, &rq->rt.pushable_tasks);
>  
>  	/* Update the highest prio pushable task */
>  	if (p->prio < rq->rt.highest_prio.next)
> @@ -385,7 +389,8 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
>  
>  #else
>  
> -static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
> +static inline
> +void enqueue_pushable_task(struct rq *rq, struct task_struct *p, bool head)
>  {
>  }
>  
> @@ -1260,7 +1265,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
>  	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
>  
>  	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
> -		enqueue_pushable_task(rq, p);
> +		enqueue_pushable_task(rq, p, false);

Hmm, I really don't like the "false" parameter all over the place, since
it's only needed in one place. Thinking about this more, what about
keeping enqueue_pushable_task() as is, and adding an
enqueue_pushable_task_preempted(). Having something like this:

static inline void
enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
	enqueue_pushable_task_preempted(rq, p, false);
}


>  }
>  
>  static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
> @@ -1507,7 +1512,16 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
>  	 * if it is still active
>  	 */
>  	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
> -		enqueue_pushable_task(rq, p);
> +		/*
> +		 * put_prev_task_rt() is called by many functions,
> +		 * pick_next_task_rt() is the only one may have
> +		 * PREEMPT_ACTIVE set. So if detecting p(current
> +		 * task) is preempted in such case, we should
> +		 * enqueue it to the front of the pushable plist,
> +		 * as there may be multiple tasks with the same
> +		 * priority as p.
> +		 */
> +		enqueue_pushable_task(rq, p, !!(preempt_count() & PREEMPT_ACTIVE));

Then we don't need to touch any of the code but this place, and this
would be:

		enqueue_pushable_task_preempted(rq, p,
			 !!(preempt_count() & PREEMPT_ACTIVE));

I'm thinking this would be much more descriptive.

What do you think?

-- Steve

>  }
>  
>  #ifdef CONFIG_SMP
> @@ -2091,7 +2105,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
>  		rq->rt.rt_nr_migratory--;
>  	} else {
>  		if (!task_current(rq, p))
> -			enqueue_pushable_task(rq, p);
> +			enqueue_pushable_task(rq, p, false);
>  		rq->rt.rt_nr_migratory++;
>  	}
>  


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2015-04-09 13:56 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-04-09  3:27 [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio Xunlei Pang
2015-04-09  3:27 ` [PATCH v5 2/3] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases Xunlei Pang
2015-04-09 13:56   ` Steven Rostedt
2015-04-09  3:27 ` [PATCH v5 3/3] sched/rt: Check to push the task when changing its affinity Xunlei Pang
2015-04-09 13:43 ` [PATCH v5 1/3] lib/plist: Provide plist_add_head() for nodes with the same prio Steven Rostedt

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.