All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
@ 2014-09-12 11:03 Kirill Tkhai
  2014-09-14  9:13 ` Peter Zijlstra
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Kirill Tkhai @ 2014-09-12 11:03 UTC (permalink / raw)
  To: linux-kernel; +Cc: Peter Zijlstra, Ingo Molnar, tkhai


If a task is queued but not running on it rq, we can simply migrate
it without migration thread and switching of context.

Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
---
 kernel/sched/core.c |   47 ++++++++++++++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 15 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d4399b4..dbbba26 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4594,6 +4594,33 @@ void init_idle(struct task_struct *idle, int cpu)
 }
 
 #ifdef CONFIG_SMP
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
+{
+	struct rq *rq = task_rq(p);
+
+	lockdep_assert_held(&rq->lock);
+
+	dequeue_task(rq, p, 0);
+	p->on_rq = TASK_ON_RQ_MIGRATING;
+	set_task_cpu(p, new_cpu);
+	raw_spin_unlock(&rq->lock);
+
+	rq = cpu_rq(new_cpu);
+
+	raw_spin_lock(&rq->lock);
+	BUG_ON(task_cpu(p) != new_cpu);
+	p->on_rq = TASK_ON_RQ_QUEUED;
+	enqueue_task(rq, p, 0);
+	check_preempt_curr(rq, p, 0);
+
+	return rq;
+}
+
 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
 	if (p->sched_class && p->sched_class->set_cpus_allowed)
@@ -4650,14 +4677,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 		goto out;
 
 	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-	if (task_on_rq_queued(p) || p->state == TASK_WAKING) {
+	if (task_running(rq, p) || p->state == TASK_WAKING) {
 		struct migration_arg arg = { p, dest_cpu };
 		/* Need help from migration thread: drop lock and wait. */
 		task_rq_unlock(rq, p, &flags);
 		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
 		tlb_migrate_finish(p->mm);
 		return 0;
-	}
+	} else if (task_on_rq_queued(p))
+		rq = move_queued_task(p, dest_cpu);
 out:
 	task_rq_unlock(rq, p, &flags);
 
@@ -4700,19 +4728,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 	 * If we're not on a rq, the next wake-up will ensure we're
 	 * placed properly.
 	 */
-	if (task_on_rq_queued(p)) {
-		dequeue_task(rq, p, 0);
-		p->on_rq = TASK_ON_RQ_MIGRATING;
-		set_task_cpu(p, dest_cpu);
-		raw_spin_unlock(&rq->lock);
-
-		rq = cpu_rq(dest_cpu);
-		raw_spin_lock(&rq->lock);
-		BUG_ON(task_rq(p) != rq);
-		p->on_rq = TASK_ON_RQ_QUEUED;
-		enqueue_task(rq, p, 0);
-		check_preempt_curr(rq, p, 0);
-	}
+	if (task_on_rq_queued(p))
+		rq = move_queued_task(p, dest_cpu);
 done:
 	ret = 1;
 fail:




^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-12 11:03 [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running Kirill Tkhai
@ 2014-09-14  9:13 ` Peter Zijlstra
  2014-09-14  9:20   ` Kirill Tkhai
  2014-09-15  9:48 ` Preeti Murthy
  2014-09-19 11:46 ` [tip:sched/core] " tip-bot for Kirill Tkhai
  2 siblings, 1 reply; 10+ messages in thread
From: Peter Zijlstra @ 2014-09-14  9:13 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: linux-kernel, Ingo Molnar, tkhai

On Fri, Sep 12, 2014 at 03:03:34PM +0400, Kirill Tkhai wrote:
> 
> If a task is queued but not running on it rq, we can simply migrate
> it without migration thread and switching of context.
> 
> Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
> ---
>  kernel/sched/core.c |   47 ++++++++++++++++++++++++++++++++---------------
>  1 file changed, 32 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index d4399b4..dbbba26 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -4594,6 +4594,33 @@ void init_idle(struct task_struct *idle, int cpu)
>  }
>  
>  #ifdef CONFIG_SMP
> +/*
> + * move_queued_task - move a queued task to new rq.
> + *
> + * Returns (locked) new rq. Old rq's lock is released.
> + */
> +static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
> +{
> +	struct rq *rq = task_rq(p);
> +
> +	lockdep_assert_held(&rq->lock);
> +
> +	dequeue_task(rq, p, 0);
> +	p->on_rq = TASK_ON_RQ_MIGRATING;
> +	set_task_cpu(p, new_cpu);
> +	raw_spin_unlock(&rq->lock);
> +
> +	rq = cpu_rq(new_cpu);
> +
> +	raw_spin_lock(&rq->lock);
> +	BUG_ON(task_cpu(p) != new_cpu);
> +	p->on_rq = TASK_ON_RQ_QUEUED;
> +	enqueue_task(rq, p, 0);
> +	check_preempt_curr(rq, p, 0);
> +
> +	return rq;
> +}

We have almost that exact code sequence in __migrate_task() could we
share some?

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-14  9:13 ` Peter Zijlstra
@ 2014-09-14  9:20   ` Kirill Tkhai
  2014-09-14  9:55     ` Peter Zijlstra
  0 siblings, 1 reply; 10+ messages in thread
From: Kirill Tkhai @ 2014-09-14  9:20 UTC (permalink / raw)
  To: Peter Zijlstra, Kirill Tkhai; +Cc: linux-kernel, Ingo Molnar

On 14.09.2014 13:13, Peter Zijlstra wrote:
> On Fri, Sep 12, 2014 at 03:03:34PM +0400, Kirill Tkhai wrote:
>>
>> If a task is queued but not running on it rq, we can simply migrate
>> it without migration thread and switching of context.
>>
>> Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
>> ---
>>  kernel/sched/core.c |   47 ++++++++++++++++++++++++++++++++---------------
>>  1 file changed, 32 insertions(+), 15 deletions(-)
>>
>> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
>> index d4399b4..dbbba26 100644
>> --- a/kernel/sched/core.c
>> +++ b/kernel/sched/core.c
>> @@ -4594,6 +4594,33 @@ void init_idle(struct task_struct *idle, int cpu)
>>  }
>>  
>>  #ifdef CONFIG_SMP
>> +/*
>> + * move_queued_task - move a queued task to new rq.
>> + *
>> + * Returns (locked) new rq. Old rq's lock is released.
>> + */
>> +static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
>> +{
>> +	struct rq *rq = task_rq(p);
>> +
>> +	lockdep_assert_held(&rq->lock);
>> +
>> +	dequeue_task(rq, p, 0);
>> +	p->on_rq = TASK_ON_RQ_MIGRATING;
>> +	set_task_cpu(p, new_cpu);
>> +	raw_spin_unlock(&rq->lock);
>> +
>> +	rq = cpu_rq(new_cpu);
>> +
>> +	raw_spin_lock(&rq->lock);
>> +	BUG_ON(task_cpu(p) != new_cpu);
>> +	p->on_rq = TASK_ON_RQ_QUEUED;
>> +	enqueue_task(rq, p, 0);
>> +	check_preempt_curr(rq, p, 0);
>> +
>> +	return rq;
>> +}
> 
> We have almost that exact code sequence in __migrate_task() could we
> share some?
> 

Peter, haven't I moved the sequence from __migrate_task??

Rechecked again; yes, patch makes __migrate_task using new move_queued_task.

Kirill

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-14  9:20   ` Kirill Tkhai
@ 2014-09-14  9:55     ` Peter Zijlstra
  2014-09-14  9:57       ` Kirill Tkhai
  0 siblings, 1 reply; 10+ messages in thread
From: Peter Zijlstra @ 2014-09-14  9:55 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: Kirill Tkhai, linux-kernel, Ingo Molnar

On Sun, Sep 14, 2014 at 01:20:32PM +0400, Kirill Tkhai wrote:
> On 14.09.2014 13:13, Peter Zijlstra wrote:
> Peter, haven't I moved the sequence from __migrate_task??
> 
> Rechecked again; yes, patch makes __migrate_task using new move_queued_task.

Yes you have ... urgh.. that's what you get for traveling 9 timezones
and trying to do 'work' at 3 am which is really noon.

Lemme try and get my head on straight and have another look.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-14  9:55     ` Peter Zijlstra
@ 2014-09-14  9:57       ` Kirill Tkhai
  0 siblings, 0 replies; 10+ messages in thread
From: Kirill Tkhai @ 2014-09-14  9:57 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Kirill Tkhai, linux-kernel, Ingo Molnar

On 14.09.2014 13:55, Peter Zijlstra wrote:
> On Sun, Sep 14, 2014 at 01:20:32PM +0400, Kirill Tkhai wrote:
>> On 14.09.2014 13:13, Peter Zijlstra wrote:
>> Peter, haven't I moved the sequence from __migrate_task??
>>
>> Rechecked again; yes, patch makes __migrate_task using new move_queued_task.
> 
> Yes you have ... urgh.. that's what you get for traveling 9 timezones
> and trying to do 'work' at 3 am which is really noon.
> 
> Lemme try and get my head on straight and have another look.
> 

Oh, no problem :) Happy Programmers Day!!!

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-12 11:03 [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running Kirill Tkhai
  2014-09-14  9:13 ` Peter Zijlstra
@ 2014-09-15  9:48 ` Preeti Murthy
  2014-09-15 10:02   ` Kirill Tkhai
  2014-09-19 11:46 ` [tip:sched/core] " tip-bot for Kirill Tkhai
  2 siblings, 1 reply; 10+ messages in thread
From: Preeti Murthy @ 2014-09-15  9:48 UTC (permalink / raw)
  To: Kirill Tkhai
  Cc: LKML, Peter Zijlstra, Ingo Molnar, Kirill Tkhai, Preeti U Murthy

Hi Kirill,

Which tree is this patch based on? __migrate_task() does a
double_rq_lock/unlock() today in mainline, doesn't it? I don't
however see that in your patch.

Regards
Preeti U Murthy

On Fri, Sep 12, 2014 at 4:33 PM, Kirill Tkhai <ktkhai@parallels.com> wrote:
>
> If a task is queued but not running on it rq, we can simply migrate
> it without migration thread and switching of context.
>
> Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
> ---
>  kernel/sched/core.c |   47 ++++++++++++++++++++++++++++++++---------------
>  1 file changed, 32 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index d4399b4..dbbba26 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -4594,6 +4594,33 @@ void init_idle(struct task_struct *idle, int cpu)
>  }
>
>  #ifdef CONFIG_SMP
> +/*
> + * move_queued_task - move a queued task to new rq.
> + *
> + * Returns (locked) new rq. Old rq's lock is released.
> + */
> +static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
> +{
> +       struct rq *rq = task_rq(p);
> +
> +       lockdep_assert_held(&rq->lock);
> +
> +       dequeue_task(rq, p, 0);
> +       p->on_rq = TASK_ON_RQ_MIGRATING;
> +       set_task_cpu(p, new_cpu);
> +       raw_spin_unlock(&rq->lock);
> +
> +       rq = cpu_rq(new_cpu);
> +
> +       raw_spin_lock(&rq->lock);
> +       BUG_ON(task_cpu(p) != new_cpu);
> +       p->on_rq = TASK_ON_RQ_QUEUED;
> +       enqueue_task(rq, p, 0);
> +       check_preempt_curr(rq, p, 0);
> +
> +       return rq;
> +}
> +
>  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
>  {
>         if (p->sched_class && p->sched_class->set_cpus_allowed)
> @@ -4650,14 +4677,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
>                 goto out;
>
>         dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
> -       if (task_on_rq_queued(p) || p->state == TASK_WAKING) {
> +       if (task_running(rq, p) || p->state == TASK_WAKING) {
>                 struct migration_arg arg = { p, dest_cpu };
>                 /* Need help from migration thread: drop lock and wait. */
>                 task_rq_unlock(rq, p, &flags);
>                 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
>                 tlb_migrate_finish(p->mm);
>                 return 0;
> -       }
> +       } else if (task_on_rq_queued(p))
> +               rq = move_queued_task(p, dest_cpu);
>  out:
>         task_rq_unlock(rq, p, &flags);
>
> @@ -4700,19 +4728,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
>          * If we're not on a rq, the next wake-up will ensure we're
>          * placed properly.
>          */
> -       if (task_on_rq_queued(p)) {
> -               dequeue_task(rq, p, 0);
> -               p->on_rq = TASK_ON_RQ_MIGRATING;
> -               set_task_cpu(p, dest_cpu);
> -               raw_spin_unlock(&rq->lock);
> -
> -               rq = cpu_rq(dest_cpu);
> -               raw_spin_lock(&rq->lock);
> -               BUG_ON(task_rq(p) != rq);
> -               p->on_rq = TASK_ON_RQ_QUEUED;
> -               enqueue_task(rq, p, 0);
> -               check_preempt_curr(rq, p, 0);
> -       }
> +       if (task_on_rq_queued(p))
> +               rq = move_queued_task(p, dest_cpu);
>  done:
>         ret = 1;
>  fail:
>
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-15  9:48 ` Preeti Murthy
@ 2014-09-15 10:02   ` Kirill Tkhai
  2014-09-15 11:37     ` Peter Zijlstra
  2014-09-16 11:29     ` Preeti U Murthy
  0 siblings, 2 replies; 10+ messages in thread
From: Kirill Tkhai @ 2014-09-15 10:02 UTC (permalink / raw)
  To: Preeti Murthy
  Cc: LKML, Peter Zijlstra, Ingo Molnar, Kirill Tkhai, Preeti U Murthy

Hi, Preeti,

В Пн, 15/09/2014 в 15:18 +0530, Preeti Murthy пишет:
> Hi Kirill,
> 
> Which tree is this patch based on? __migrate_task() does a
> double_rq_lock/unlock() today in mainline, doesn't it? I don't
> however see that in your patch.

it's based on recent tip tree:
https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/

Also, once I received a build-robot message about my patch
makes warning on allyesconfig, and since that time I learned
about Peter's tree :) 

https://git.kernel.org/cgit/linux/kernel/git/peterz/queue.git

(This is for info, I don't know if it is right to make patches
based on in. But really good if you was away for some time
and you're interested in recent news without lkml archive reading.
I use it :)

Regards,
Kirill

> On Fri, Sep 12, 2014 at 4:33 PM, Kirill Tkhai <ktkhai@parallels.com> wrote:
> >
> > If a task is queued but not running on it rq, we can simply migrate
> > it without migration thread and switching of context.
> >
> > Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
> > ---
> >  kernel/sched/core.c |   47 ++++++++++++++++++++++++++++++++---------------
> >  1 file changed, 32 insertions(+), 15 deletions(-)
> >
> > diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> > index d4399b4..dbbba26 100644
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -4594,6 +4594,33 @@ void init_idle(struct task_struct *idle, int cpu)
> >  }
> >
> >  #ifdef CONFIG_SMP
> > +/*
> > + * move_queued_task - move a queued task to new rq.
> > + *
> > + * Returns (locked) new rq. Old rq's lock is released.
> > + */
> > +static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
> > +{
> > +       struct rq *rq = task_rq(p);
> > +
> > +       lockdep_assert_held(&rq->lock);
> > +
> > +       dequeue_task(rq, p, 0);
> > +       p->on_rq = TASK_ON_RQ_MIGRATING;
> > +       set_task_cpu(p, new_cpu);
> > +       raw_spin_unlock(&rq->lock);
> > +
> > +       rq = cpu_rq(new_cpu);
> > +
> > +       raw_spin_lock(&rq->lock);
> > +       BUG_ON(task_cpu(p) != new_cpu);
> > +       p->on_rq = TASK_ON_RQ_QUEUED;
> > +       enqueue_task(rq, p, 0);
> > +       check_preempt_curr(rq, p, 0);
> > +
> > +       return rq;
> > +}
> > +
> >  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
> >  {
> >         if (p->sched_class && p->sched_class->set_cpus_allowed)
> > @@ -4650,14 +4677,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
> >                 goto out;
> >
> >         dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
> > -       if (task_on_rq_queued(p) || p->state == TASK_WAKING) {
> > +       if (task_running(rq, p) || p->state == TASK_WAKING) {
> >                 struct migration_arg arg = { p, dest_cpu };
> >                 /* Need help from migration thread: drop lock and wait. */
> >                 task_rq_unlock(rq, p, &flags);
> >                 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
> >                 tlb_migrate_finish(p->mm);
> >                 return 0;
> > -       }
> > +       } else if (task_on_rq_queued(p))
> > +               rq = move_queued_task(p, dest_cpu);
> >  out:
> >         task_rq_unlock(rq, p, &flags);
> >
> > @@ -4700,19 +4728,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
> >          * If we're not on a rq, the next wake-up will ensure we're
> >          * placed properly.
> >          */
> > -       if (task_on_rq_queued(p)) {
> > -               dequeue_task(rq, p, 0);
> > -               p->on_rq = TASK_ON_RQ_MIGRATING;
> > -               set_task_cpu(p, dest_cpu);
> > -               raw_spin_unlock(&rq->lock);
> > -
> > -               rq = cpu_rq(dest_cpu);
> > -               raw_spin_lock(&rq->lock);
> > -               BUG_ON(task_rq(p) != rq);
> > -               p->on_rq = TASK_ON_RQ_QUEUED;
> > -               enqueue_task(rq, p, 0);
> > -               check_preempt_curr(rq, p, 0);
> > -       }
> > +       if (task_on_rq_queued(p))
> > +               rq = move_queued_task(p, dest_cpu);
> >  done:
> >         ret = 1;
> >  fail:
> >
> >
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > Please read the FAQ at  http://www.tux.org/lkml/



^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-15 10:02   ` Kirill Tkhai
@ 2014-09-15 11:37     ` Peter Zijlstra
  2014-09-16 11:29     ` Preeti U Murthy
  1 sibling, 0 replies; 10+ messages in thread
From: Peter Zijlstra @ 2014-09-15 11:37 UTC (permalink / raw)
  To: Kirill Tkhai
  Cc: Preeti Murthy, LKML, Ingo Molnar, Kirill Tkhai, Preeti U Murthy

On Mon, Sep 15, 2014 at 02:02:30PM +0400, Kirill Tkhai wrote:
> Hi, Preeti,
> 
> В Пн, 15/09/2014 в 15:18 +0530, Preeti Murthy пишет:
> > Hi Kirill,
> > 
> > Which tree is this patch based on? __migrate_task() does a
> > double_rq_lock/unlock() today in mainline, doesn't it? I don't
> > however see that in your patch.
> 
> it's based on recent tip tree:
> https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/
> 
> Also, once I received a build-robot message about my patch
> makes warning on allyesconfig, and since that time I learned
> about Peter's tree :) 
> 
> https://git.kernel.org/cgit/linux/kernel/git/peterz/queue.git
> 
> (This is for info, I don't know if it is right to make patches
> based on in. But really good if you was away for some time
> and you're interested in recent news without lkml archive reading.
> I use it :)

Its mostly a scratch tree to get some build coverage and to show people
what all is queued up in my quilt tree. The tree is fully recreated
every time I export my queue and at times I forget to export.

So feel free to look at, but basing work on it might not be the bestest
idea. Ideally things should find their way into -tip fairly quickly (or
get dropped on the ground if they come apart, but that usually goes
paired with an email telling you about the borkage).

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-15 10:02   ` Kirill Tkhai
  2014-09-15 11:37     ` Peter Zijlstra
@ 2014-09-16 11:29     ` Preeti U Murthy
  1 sibling, 0 replies; 10+ messages in thread
From: Preeti U Murthy @ 2014-09-16 11:29 UTC (permalink / raw)
  To: Kirill Tkhai, preeti; +Cc: LKML, Peter Zijlstra, Ingo Molnar, Kirill Tkhai

On 09/15/2014 03:32 PM, Kirill Tkhai wrote:
> Hi, Preeti,
> 
> В Пн, 15/09/2014 в 15:18 +0530, Preeti Murthy пишет:
>> Hi Kirill,
>>
>> Which tree is this patch based on? __migrate_task() does a
>> double_rq_lock/unlock() today in mainline, doesn't it? I don't
>> however see that in your patch.
> 
> it's based on recent tip tree:
> https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/
> 
> Also, once I received a build-robot message about my patch
> makes warning on allyesconfig, and since that time I learned
> about Peter's tree :) 
> 
> https://git.kernel.org/cgit/linux/kernel/git/peterz/queue.git
> 
> (This is for info, I don't know if it is right to make patches
> based on in. But really good if you was away for some time
> and you're interested in recent news without lkml archive reading.
> I use it :)

Ok I checked the patch against tip tree and the patch looks good to me.

Reviewed-by; Preeti U Murthy <preeti@linux.vnet.ibm.com>

Thanks!

Regards
Preeti U Murthy
> 
> Regards,
> Kirill
> 
>> On Fri, Sep 12, 2014 at 4:33 PM, Kirill Tkhai <ktkhai@parallels.com> wrote:
>>>
>>> If a task is queued but not running on it rq, we can simply migrate
>>> it without migration thread and switching of context.
>>>
>>> Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
>>> ---
>>>  kernel/sched/core.c |   47 ++++++++++++++++++++++++++++++++---------------
>>>  1 file changed, 32 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
>>> index d4399b4..dbbba26 100644
>>> --- a/kernel/sched/core.c
>>> +++ b/kernel/sched/core.c
>>> @@ -4594,6 +4594,33 @@ void init_idle(struct task_struct *idle, int cpu)
>>>  }
>>>
>>>  #ifdef CONFIG_SMP
>>> +/*
>>> + * move_queued_task - move a queued task to new rq.
>>> + *
>>> + * Returns (locked) new rq. Old rq's lock is released.
>>> + */
>>> +static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
>>> +{
>>> +       struct rq *rq = task_rq(p);
>>> +
>>> +       lockdep_assert_held(&rq->lock);
>>> +
>>> +       dequeue_task(rq, p, 0);
>>> +       p->on_rq = TASK_ON_RQ_MIGRATING;
>>> +       set_task_cpu(p, new_cpu);
>>> +       raw_spin_unlock(&rq->lock);
>>> +
>>> +       rq = cpu_rq(new_cpu);
>>> +
>>> +       raw_spin_lock(&rq->lock);
>>> +       BUG_ON(task_cpu(p) != new_cpu);
>>> +       p->on_rq = TASK_ON_RQ_QUEUED;
>>> +       enqueue_task(rq, p, 0);
>>> +       check_preempt_curr(rq, p, 0);
>>> +
>>> +       return rq;
>>> +}
>>> +
>>>  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
>>>  {
>>>         if (p->sched_class && p->sched_class->set_cpus_allowed)
>>> @@ -4650,14 +4677,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
>>>                 goto out;
>>>
>>>         dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
>>> -       if (task_on_rq_queued(p) || p->state == TASK_WAKING) {
>>> +       if (task_running(rq, p) || p->state == TASK_WAKING) {
>>>                 struct migration_arg arg = { p, dest_cpu };
>>>                 /* Need help from migration thread: drop lock and wait. */
>>>                 task_rq_unlock(rq, p, &flags);
>>>                 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
>>>                 tlb_migrate_finish(p->mm);
>>>                 return 0;
>>> -       }
>>> +       } else if (task_on_rq_queued(p))
>>> +               rq = move_queued_task(p, dest_cpu);
>>>  out:
>>>         task_rq_unlock(rq, p, &flags);
>>>
>>> @@ -4700,19 +4728,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
>>>          * If we're not on a rq, the next wake-up will ensure we're
>>>          * placed properly.
>>>          */
>>> -       if (task_on_rq_queued(p)) {
>>> -               dequeue_task(rq, p, 0);
>>> -               p->on_rq = TASK_ON_RQ_MIGRATING;
>>> -               set_task_cpu(p, dest_cpu);
>>> -               raw_spin_unlock(&rq->lock);
>>> -
>>> -               rq = cpu_rq(dest_cpu);
>>> -               raw_spin_lock(&rq->lock);
>>> -               BUG_ON(task_rq(p) != rq);
>>> -               p->on_rq = TASK_ON_RQ_QUEUED;
>>> -               enqueue_task(rq, p, 0);
>>> -               check_preempt_curr(rq, p, 0);
>>> -       }
>>> +       if (task_on_rq_queued(p))
>>> +               rq = move_queued_task(p, dest_cpu);
>>>  done:
>>>         ret = 1;
>>>  fail:
>>>
>>>
>>>
>>> --
>>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>>> the body of a message to majordomo@vger.kernel.org
>>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>>> Please read the FAQ at  http://www.tux.org/lkml/
> 
> 


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [tip:sched/core] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
  2014-09-12 11:03 [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running Kirill Tkhai
  2014-09-14  9:13 ` Peter Zijlstra
  2014-09-15  9:48 ` Preeti Murthy
@ 2014-09-19 11:46 ` tip-bot for Kirill Tkhai
  2 siblings, 0 replies; 10+ messages in thread
From: tip-bot for Kirill Tkhai @ 2014-09-19 11:46 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, ktkhai, hpa, mingo, torvalds, peterz, tglx

Commit-ID:  a15b12ac36ad4e7b856a4ae54937ae26a51aebad
Gitweb:     http://git.kernel.org/tip/a15b12ac36ad4e7b856a4ae54937ae26a51aebad
Author:     Kirill Tkhai <ktkhai@parallels.com>
AuthorDate: Fri, 12 Sep 2014 15:03:34 +0400
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Fri, 19 Sep 2014 12:35:21 +0200

sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running

If a task is queued but not running on it rq, we can simply migrate
it without migration thread and switching of context.

Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1410519814.3569.7.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/sched/core.c | 47 ++++++++++++++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 15 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5536397..4b1ddeb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4629,6 +4629,33 @@ void init_idle(struct task_struct *idle, int cpu)
 }
 
 #ifdef CONFIG_SMP
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
+{
+	struct rq *rq = task_rq(p);
+
+	lockdep_assert_held(&rq->lock);
+
+	dequeue_task(rq, p, 0);
+	p->on_rq = TASK_ON_RQ_MIGRATING;
+	set_task_cpu(p, new_cpu);
+	raw_spin_unlock(&rq->lock);
+
+	rq = cpu_rq(new_cpu);
+
+	raw_spin_lock(&rq->lock);
+	BUG_ON(task_cpu(p) != new_cpu);
+	p->on_rq = TASK_ON_RQ_QUEUED;
+	enqueue_task(rq, p, 0);
+	check_preempt_curr(rq, p, 0);
+
+	return rq;
+}
+
 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
 	if (p->sched_class && p->sched_class->set_cpus_allowed)
@@ -4685,14 +4712,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 		goto out;
 
 	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-	if (task_on_rq_queued(p) || p->state == TASK_WAKING) {
+	if (task_running(rq, p) || p->state == TASK_WAKING) {
 		struct migration_arg arg = { p, dest_cpu };
 		/* Need help from migration thread: drop lock and wait. */
 		task_rq_unlock(rq, p, &flags);
 		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
 		tlb_migrate_finish(p->mm);
 		return 0;
-	}
+	} else if (task_on_rq_queued(p))
+		rq = move_queued_task(p, dest_cpu);
 out:
 	task_rq_unlock(rq, p, &flags);
 
@@ -4735,19 +4763,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 	 * If we're not on a rq, the next wake-up will ensure we're
 	 * placed properly.
 	 */
-	if (task_on_rq_queued(p)) {
-		dequeue_task(rq, p, 0);
-		p->on_rq = TASK_ON_RQ_MIGRATING;
-		set_task_cpu(p, dest_cpu);
-		raw_spin_unlock(&rq->lock);
-
-		rq = cpu_rq(dest_cpu);
-		raw_spin_lock(&rq->lock);
-		BUG_ON(task_rq(p) != rq);
-		p->on_rq = TASK_ON_RQ_QUEUED;
-		enqueue_task(rq, p, 0);
-		check_preempt_curr(rq, p, 0);
-	}
+	if (task_on_rq_queued(p))
+		rq = move_queued_task(p, dest_cpu);
 done:
 	ret = 1;
 fail:

^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2014-09-19 11:47 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-12 11:03 [PATCH] sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running Kirill Tkhai
2014-09-14  9:13 ` Peter Zijlstra
2014-09-14  9:20   ` Kirill Tkhai
2014-09-14  9:55     ` Peter Zijlstra
2014-09-14  9:57       ` Kirill Tkhai
2014-09-15  9:48 ` Preeti Murthy
2014-09-15 10:02   ` Kirill Tkhai
2014-09-15 11:37     ` Peter Zijlstra
2014-09-16 11:29     ` Preeti U Murthy
2014-09-19 11:46 ` [tip:sched/core] " tip-bot for Kirill Tkhai

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.