All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] sched/deadline: fix earliest_dl.next is not the pushable candidate
@ 2015-10-22  7:30 Wanpeng Li
  2015-11-01 22:18 ` Wanpeng Li
  2015-11-09  9:44 ` Juri Lelli
  0 siblings, 2 replies; 3+ messages in thread
From: Wanpeng Li @ 2015-10-22  7:30 UTC (permalink / raw)
  To: Ingo Molnar, Peter Zijlstra; +Cc: Juri Lelli, linux-kernel, Wanpeng Li

earliest_dl.next is used to cache the next earliest ready task 
which also is pushable in order to be a candidate of pushable 
tasks during pull algorithm. If the earliest_dl.next deadline 
of the sr_rq is earlier than the earliest_dl.curr deadline of 
current rq, the task from the sr_rq can be pulled. However, 
current implementation logic of earliest_dl.next just guarantee 
it is the next earliest ready task instead of the next pushable 
earliest task, which will result in hold both rqs' lock and find 
nothing to preempt our current since the affinity in pull algorithm. 
In addition, current logic doesn't update the next candidate for 
pushing in pick_next_task_dl() even if the running task is never 
eligible.

This patch fix it by updating earliest_dl.next when pushable dl task is 
enqueued/dequeued the same way as rt class.

Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
---
 kernel/sched/deadline.c | 58 ++++++-------------------------------------------
 1 file changed, 7 insertions(+), 51 deletions(-)

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 142df26..6175c94 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -87,6 +87,8 @@ void init_dl_rq(struct dl_rq *dl_rq)
 
 #ifdef CONFIG_SMP
 
+static struct task_struct *pick_next_pushable_dl_task(struct rq *rq);
+
 static inline int dl_overloaded(struct rq *rq)
 {
 	return atomic_read(&rq->rd->dlo_count);
@@ -181,6 +183,9 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 
 	rb_link_node(&p->pushable_dl_tasks, parent, link);
 	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
+
+	if (dl_time_before(p->dl.deadline, dl_rq->earliest_dl.next))
+		dl_rq->earliest_dl.next = p->dl.deadline;
 }
 
 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
@@ -199,6 +204,8 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 
 	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
+
+	dl_rq->earliest_dl.next = pick_next_pushable_dl_task(rq)->dl.deadline;
 }
 
 static inline int has_pushable_dl_tasks(struct rq *rq)
@@ -775,42 +782,14 @@ static void update_curr_dl(struct rq *rq)
 
 #ifdef CONFIG_SMP
 
-static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
-
-static inline u64 next_deadline(struct rq *rq)
-{
-	struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
-
-	if (next && dl_prio(next->prio))
-		return next->dl.deadline;
-	else
-		return 0;
-}
-
 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 {
 	struct rq *rq = rq_of_dl_rq(dl_rq);
 
 	if (dl_rq->earliest_dl.curr == 0 ||
 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
-		/*
-		 * If the dl_rq had no -deadline tasks, or if the new task
-		 * has shorter deadline than the current one on dl_rq, we
-		 * know that the previous earliest becomes our next earliest,
-		 * as the new task becomes the earliest itself.
-		 */
-		dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
 		dl_rq->earliest_dl.curr = deadline;
 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
-	} else if (dl_rq->earliest_dl.next == 0 ||
-		   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
-		/*
-		 * On the other hand, if the new -deadline task has a
-		 * a later deadline than the earliest one on dl_rq, but
-		 * it is earlier than the next (if any), we must
-		 * recompute the next-earliest.
-		 */
-		dl_rq->earliest_dl.next = next_deadline(rq);
 	}
 }
 
@@ -832,7 +811,6 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 
 		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
 		dl_rq->earliest_dl.curr = entry->deadline;
-		dl_rq->earliest_dl.next = next_deadline(rq);
 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
 	}
 }
@@ -1267,28 +1245,6 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
 	return 0;
 }
 
-/* Returns the second earliest -deadline task, NULL otherwise */
-static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
-{
-	struct rb_node *next_node = rq->dl.rb_leftmost;
-	struct sched_dl_entity *dl_se;
-	struct task_struct *p = NULL;
-
-next_node:
-	next_node = rb_next(next_node);
-	if (next_node) {
-		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
-		p = dl_task_of(dl_se);
-
-		if (pick_dl_task(rq, p, cpu))
-			return p;
-
-		goto next_node;
-	}
-
-	return NULL;
-}
-
 /*
  * Return the earliest pushable rq's task, which is suitable to be executed
  * on the CPU, NULL otherwise:
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] sched/deadline: fix earliest_dl.next is not the pushable candidate
  2015-10-22  7:30 [PATCH] sched/deadline: fix earliest_dl.next is not the pushable candidate Wanpeng Li
@ 2015-11-01 22:18 ` Wanpeng Li
  2015-11-09  9:44 ` Juri Lelli
  1 sibling, 0 replies; 3+ messages in thread
From: Wanpeng Li @ 2015-11-01 22:18 UTC (permalink / raw)
  To: Ingo Molnar, Peter Zijlstra; +Cc: Juri Lelli, linux-kernel

Ping Juri, :-)
On 10/22/15 3:30 PM, Wanpeng Li wrote:
> earliest_dl.next is used to cache the next earliest ready task
> which also is pushable in order to be a candidate of pushable
> tasks during pull algorithm. If the earliest_dl.next deadline
> of the sr_rq is earlier than the earliest_dl.curr deadline of
> current rq, the task from the sr_rq can be pulled. However,
> current implementation logic of earliest_dl.next just guarantee
> it is the next earliest ready task instead of the next pushable
> earliest task, which will result in hold both rqs' lock and find
> nothing to preempt our current since the affinity in pull algorithm.
> In addition, current logic doesn't update the next candidate for
> pushing in pick_next_task_dl() even if the running task is never
> eligible.
>
> This patch fix it by updating earliest_dl.next when pushable dl task is
> enqueued/dequeued the same way as rt class.
>
> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
> ---
>   kernel/sched/deadline.c | 58 ++++++-------------------------------------------
>   1 file changed, 7 insertions(+), 51 deletions(-)
>
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index 142df26..6175c94 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -87,6 +87,8 @@ void init_dl_rq(struct dl_rq *dl_rq)
>   
>   #ifdef CONFIG_SMP
>   
> +static struct task_struct *pick_next_pushable_dl_task(struct rq *rq);
> +
>   static inline int dl_overloaded(struct rq *rq)
>   {
>   	return atomic_read(&rq->rd->dlo_count);
> @@ -181,6 +183,9 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
>   
>   	rb_link_node(&p->pushable_dl_tasks, parent, link);
>   	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
> +
> +	if (dl_time_before(p->dl.deadline, dl_rq->earliest_dl.next))
> +		dl_rq->earliest_dl.next = p->dl.deadline;
>   }
>   
>   static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
> @@ -199,6 +204,8 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
>   
>   	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
>   	RB_CLEAR_NODE(&p->pushable_dl_tasks);
> +
> +	dl_rq->earliest_dl.next = pick_next_pushable_dl_task(rq)->dl.deadline;
>   }
>   
>   static inline int has_pushable_dl_tasks(struct rq *rq)
> @@ -775,42 +782,14 @@ static void update_curr_dl(struct rq *rq)
>   
>   #ifdef CONFIG_SMP
>   
> -static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
> -
> -static inline u64 next_deadline(struct rq *rq)
> -{
> -	struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
> -
> -	if (next && dl_prio(next->prio))
> -		return next->dl.deadline;
> -	else
> -		return 0;
> -}
> -
>   static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
>   {
>   	struct rq *rq = rq_of_dl_rq(dl_rq);
>   
>   	if (dl_rq->earliest_dl.curr == 0 ||
>   	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
> -		/*
> -		 * If the dl_rq had no -deadline tasks, or if the new task
> -		 * has shorter deadline than the current one on dl_rq, we
> -		 * know that the previous earliest becomes our next earliest,
> -		 * as the new task becomes the earliest itself.
> -		 */
> -		dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
>   		dl_rq->earliest_dl.curr = deadline;
>   		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
> -	} else if (dl_rq->earliest_dl.next == 0 ||
> -		   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
> -		/*
> -		 * On the other hand, if the new -deadline task has a
> -		 * a later deadline than the earliest one on dl_rq, but
> -		 * it is earlier than the next (if any), we must
> -		 * recompute the next-earliest.
> -		 */
> -		dl_rq->earliest_dl.next = next_deadline(rq);
>   	}
>   }
>   
> @@ -832,7 +811,6 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
>   
>   		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
>   		dl_rq->earliest_dl.curr = entry->deadline;
> -		dl_rq->earliest_dl.next = next_deadline(rq);
>   		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
>   	}
>   }
> @@ -1267,28 +1245,6 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
>   	return 0;
>   }
>   
> -/* Returns the second earliest -deadline task, NULL otherwise */
> -static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
> -{
> -	struct rb_node *next_node = rq->dl.rb_leftmost;
> -	struct sched_dl_entity *dl_se;
> -	struct task_struct *p = NULL;
> -
> -next_node:
> -	next_node = rb_next(next_node);
> -	if (next_node) {
> -		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
> -		p = dl_task_of(dl_se);
> -
> -		if (pick_dl_task(rq, p, cpu))
> -			return p;
> -
> -		goto next_node;
> -	}
> -
> -	return NULL;
> -}
> -
>   /*
>    * Return the earliest pushable rq's task, which is suitable to be executed
>    * on the CPU, NULL otherwise:


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] sched/deadline: fix earliest_dl.next is not the pushable candidate
  2015-10-22  7:30 [PATCH] sched/deadline: fix earliest_dl.next is not the pushable candidate Wanpeng Li
  2015-11-01 22:18 ` Wanpeng Li
@ 2015-11-09  9:44 ` Juri Lelli
  1 sibling, 0 replies; 3+ messages in thread
From: Juri Lelli @ 2015-11-09  9:44 UTC (permalink / raw)
  To: Wanpeng Li; +Cc: Ingo Molnar, Peter Zijlstra, linux-kernel

Hi,

On 10/22/15, Wanpeng Li wrote:
> earliest_dl.next is used to cache the next earliest ready task 
> which also is pushable in order to be a candidate of pushable 
> tasks during pull algorithm. If the earliest_dl.next deadline 
> of the sr_rq is earlier than the earliest_dl.curr deadline of 
> current rq, the task from the sr_rq can be pulled. However, 
> current implementation logic of earliest_dl.next just guarantee 
> it is the next earliest ready task instead of the next pushable 
> earliest task, which will result in hold both rqs' lock and find 
> nothing to preempt our current since the affinity in pull algorithm. 
> In addition, current logic doesn't update the next candidate for 
> pushing in pick_next_task_dl() even if the running task is never 
> eligible.
> 
> This patch fix it by updating earliest_dl.next when pushable dl task is 
> enqueued/dequeued the same way as rt class.
> 
> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
> ---
>  kernel/sched/deadline.c | 58 ++++++-------------------------------------------
>  1 file changed, 7 insertions(+), 51 deletions(-)
> 
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index 142df26..6175c94 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -87,6 +87,8 @@ void init_dl_rq(struct dl_rq *dl_rq)
>  
>  #ifdef CONFIG_SMP
>  
> +static struct task_struct *pick_next_pushable_dl_task(struct rq *rq);
> +
>  static inline int dl_overloaded(struct rq *rq)
>  {
>  	return atomic_read(&rq->rd->dlo_count);
> @@ -181,6 +183,9 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
>  
>  	rb_link_node(&p->pushable_dl_tasks, parent, link);
>  	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
> +
> +	if (dl_time_before(p->dl.deadline, dl_rq->earliest_dl.next))
> +		dl_rq->earliest_dl.next = p->dl.deadline;
>  }
>  
>  static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
> @@ -199,6 +204,8 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
>  
>  	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
>  	RB_CLEAR_NODE(&p->pushable_dl_tasks);
> +
> +	dl_rq->earliest_dl.next = pick_next_pushable_dl_task(rq)->dl.deadline;

NULL pointer dereference if rq doesn't have pushable tasks.

Thanks,

- Juri

>  }
>  
>  static inline int has_pushable_dl_tasks(struct rq *rq)
> @@ -775,42 +782,14 @@ static void update_curr_dl(struct rq *rq)
>  
>  #ifdef CONFIG_SMP
>  
> -static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
> -
> -static inline u64 next_deadline(struct rq *rq)
> -{
> -	struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
> -
> -	if (next && dl_prio(next->prio))
> -		return next->dl.deadline;
> -	else
> -		return 0;
> -}
> -
>  static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
>  {
>  	struct rq *rq = rq_of_dl_rq(dl_rq);
>  
>  	if (dl_rq->earliest_dl.curr == 0 ||
>  	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
> -		/*
> -		 * If the dl_rq had no -deadline tasks, or if the new task
> -		 * has shorter deadline than the current one on dl_rq, we
> -		 * know that the previous earliest becomes our next earliest,
> -		 * as the new task becomes the earliest itself.
> -		 */
> -		dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
>  		dl_rq->earliest_dl.curr = deadline;
>  		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
> -	} else if (dl_rq->earliest_dl.next == 0 ||
> -		   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
> -		/*
> -		 * On the other hand, if the new -deadline task has a
> -		 * a later deadline than the earliest one on dl_rq, but
> -		 * it is earlier than the next (if any), we must
> -		 * recompute the next-earliest.
> -		 */
> -		dl_rq->earliest_dl.next = next_deadline(rq);
>  	}
>  }
>  
> @@ -832,7 +811,6 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
>  
>  		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
>  		dl_rq->earliest_dl.curr = entry->deadline;
> -		dl_rq->earliest_dl.next = next_deadline(rq);
>  		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
>  	}
>  }
> @@ -1267,28 +1245,6 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
>  	return 0;
>  }
>  
> -/* Returns the second earliest -deadline task, NULL otherwise */
> -static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
> -{
> -	struct rb_node *next_node = rq->dl.rb_leftmost;
> -	struct sched_dl_entity *dl_se;
> -	struct task_struct *p = NULL;
> -
> -next_node:
> -	next_node = rb_next(next_node);
> -	if (next_node) {
> -		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
> -		p = dl_task_of(dl_se);
> -
> -		if (pick_dl_task(rq, p, cpu))
> -			return p;
> -
> -		goto next_node;
> -	}
> -
> -	return NULL;
> -}
> -
>  /*
>   * Return the earliest pushable rq's task, which is suitable to be executed
>   * on the CPU, NULL otherwise:
> -- 
> 1.9.1
> 
> 


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2015-11-09  9:44 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-10-22  7:30 [PATCH] sched/deadline: fix earliest_dl.next is not the pushable candidate Wanpeng Li
2015-11-01 22:18 ` Wanpeng Li
2015-11-09  9:44 ` Juri Lelli

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.