linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/1] kthread: break dependency between worker->lock and task_struct->pi_lock
@ 2020-04-27 18:43 Suren Baghdasaryan
  2020-04-28 16:31 ` Peter Zijlstra
  0 siblings, 1 reply; 4+ messages in thread
From: Suren Baghdasaryan @ 2020-04-27 18:43 UTC (permalink / raw)
  To: surenb
  Cc: peterz, mingo, hannes, will, akpm, tglx, ben.dooks, cl, ke.wang,
	shakeelb, linux-kernel, kernel-team

A number of kthread-related functions indirectly take task_struct->pi_lock
while holding worker->lock in the call chain like this:
    spin_lock(&worker->lock)
    kthread_insert_work
    wake_up_process
    try_to_wake_up
    raw_spin_lock_irqsave(&p->pi_lock, flags)

This lock dependency exists whenever kthread_insert_work is called either
directly or indirectly via __kthread_queue_delayed_work in the following
functions:
    kthread_queue_work
    kthread_delayed_work_timer_fn
    kthread_queue_delayed_work
    kthread_flush_work
    kthread_mod_delayed_work

This creates possibilities for circular dependencies like the one reported
at: https://lkml.org/lkml/2020/4/24/954
Break this lock dependency by moving task wakeup after worker->lock has
been released.

Reported-by: Ke Wang <ke.wang@unisoc.com>
Reported-by: Shakeel Butt <shakeelb@google.com>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Tested-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 kernel/kthread.c | 44 +++++++++++++++++++++++++++++++++-----------
 1 file changed, 33 insertions(+), 11 deletions(-)

diff --git a/kernel/kthread.c b/kernel/kthread.c
index bfbfa481be3a..7a93654072bb 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -9,6 +9,7 @@
 #include <uapi/linux/sched/types.h>
 #include <linux/sched.h>
 #include <linux/sched/task.h>
+#include <linux/sched/wake_q.h>
 #include <linux/kthread.h>
 #include <linux/completion.h>
 #include <linux/err.h>
@@ -806,14 +807,15 @@ static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
 /* insert @work before @pos in @worker */
 static void kthread_insert_work(struct kthread_worker *worker,
 				struct kthread_work *work,
-				struct list_head *pos)
+				struct list_head *pos,
+				struct wake_q_head *wake_q)
 {
 	kthread_insert_work_sanity_check(worker, work);
 
 	list_add_tail(&work->node, pos);
 	work->worker = worker;
 	if (!worker->current_work && likely(worker->task))
-		wake_up_process(worker->task);
+		wake_q_add(wake_q, worker->task);
 }
 
 /**
@@ -831,15 +833,19 @@ static void kthread_insert_work(struct kthread_worker *worker,
 bool kthread_queue_work(struct kthread_worker *worker,
 			struct kthread_work *work)
 {
-	bool ret = false;
+	DEFINE_WAKE_Q(wake_q);
 	unsigned long flags;
+	bool ret = false;
 
 	raw_spin_lock_irqsave(&worker->lock, flags);
 	if (!queuing_blocked(worker, work)) {
-		kthread_insert_work(worker, work, &worker->work_list);
+		kthread_insert_work(worker, work, &worker->work_list, &wake_q);
 		ret = true;
 	}
 	raw_spin_unlock_irqrestore(&worker->lock, flags);
+
+	wake_up_q(&wake_q);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -857,6 +863,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
 	struct kthread_work *work = &dwork->work;
 	struct kthread_worker *worker = work->worker;
+	DEFINE_WAKE_Q(wake_q);
 	unsigned long flags;
 
 	/*
@@ -873,15 +880,18 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	/* Move the work from worker->delayed_work_list. */
 	WARN_ON_ONCE(list_empty(&work->node));
 	list_del_init(&work->node);
-	kthread_insert_work(worker, work, &worker->work_list);
+	kthread_insert_work(worker, work, &worker->work_list, &wake_q);
 
 	raw_spin_unlock_irqrestore(&worker->lock, flags);
+
+	wake_up_q(&wake_q);
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
 					 struct kthread_delayed_work *dwork,
-					 unsigned long delay)
+					 unsigned long delay,
+					 struct wake_q_head *wake_q)
 {
 	struct timer_list *timer = &dwork->timer;
 	struct kthread_work *work = &dwork->work;
@@ -895,7 +905,7 @@ static void __kthread_queue_delayed_work(struct kthread_worker *worker,
 	 * on that there's no such delay when @delay is 0.
 	 */
 	if (!delay) {
-		kthread_insert_work(worker, work, &worker->work_list);
+		kthread_insert_work(worker, work, &worker->work_list, wake_q);
 		return;
 	}
 
@@ -928,17 +938,21 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
 				unsigned long delay)
 {
 	struct kthread_work *work = &dwork->work;
+	DEFINE_WAKE_Q(wake_q);
 	unsigned long flags;
 	bool ret = false;
 
 	raw_spin_lock_irqsave(&worker->lock, flags);
 
 	if (!queuing_blocked(worker, work)) {
-		__kthread_queue_delayed_work(worker, dwork, delay);
+		__kthread_queue_delayed_work(worker, dwork, delay, &wake_q);
 		ret = true;
 	}
 
 	raw_spin_unlock_irqrestore(&worker->lock, flags);
+
+	wake_up_q(&wake_q);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -967,6 +981,7 @@ void kthread_flush_work(struct kthread_work *work)
 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
 	};
+	DEFINE_WAKE_Q(wake_q);
 	struct kthread_worker *worker;
 	bool noop = false;
 
@@ -979,15 +994,18 @@ void kthread_flush_work(struct kthread_work *work)
 	WARN_ON_ONCE(work->worker != worker);
 
 	if (!list_empty(&work->node))
-		kthread_insert_work(worker, &fwork.work, work->node.next);
+		kthread_insert_work(worker, &fwork.work, work->node.next,
+				    &wake_q);
 	else if (worker->current_work == work)
 		kthread_insert_work(worker, &fwork.work,
-				    worker->work_list.next);
+				    worker->work_list.next, &wake_q);
 	else
 		noop = true;
 
 	raw_spin_unlock_irq(&worker->lock);
 
+	wake_up_q(&wake_q);
+
 	if (!noop)
 		wait_for_completion(&fwork.done);
 }
@@ -1065,6 +1083,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 			      unsigned long delay)
 {
 	struct kthread_work *work = &dwork->work;
+	DEFINE_WAKE_Q(wake_q);
 	unsigned long flags;
 	int ret = false;
 
@@ -1083,9 +1102,12 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 
 	ret = __kthread_cancel_work(work, true, &flags);
 fast_queue:
-	__kthread_queue_delayed_work(worker, dwork, delay);
+	__kthread_queue_delayed_work(worker, dwork, delay, &wake_q);
 out:
 	raw_spin_unlock_irqrestore(&worker->lock, flags);
+
+	wake_up_q(&wake_q);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
-- 
2.26.2.303.gf8c07b1a785-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/1] kthread: break dependency between worker->lock and task_struct->pi_lock
  2020-04-27 18:43 [PATCH 1/1] kthread: break dependency between worker->lock and task_struct->pi_lock Suren Baghdasaryan
@ 2020-04-28 16:31 ` Peter Zijlstra
  2020-04-28 18:04   ` Suren Baghdasaryan
  0 siblings, 1 reply; 4+ messages in thread
From: Peter Zijlstra @ 2020-04-28 16:31 UTC (permalink / raw)
  To: Suren Baghdasaryan
  Cc: mingo, hannes, will, akpm, tglx, ben.dooks, cl, ke.wang,
	shakeelb, linux-kernel, kernel-team

On Mon, Apr 27, 2020 at 11:43:58AM -0700, Suren Baghdasaryan wrote:
> A number of kthread-related functions indirectly take task_struct->pi_lock
> while holding worker->lock in the call chain like this:
>     spin_lock(&worker->lock)
>     kthread_insert_work
>     wake_up_process
>     try_to_wake_up
>     raw_spin_lock_irqsave(&p->pi_lock, flags)
> 
> This lock dependency exists whenever kthread_insert_work is called either
> directly or indirectly via __kthread_queue_delayed_work in the following
> functions:
>     kthread_queue_work
>     kthread_delayed_work_timer_fn
>     kthread_queue_delayed_work
>     kthread_flush_work
>     kthread_mod_delayed_work
> 
> This creates possibilities for circular dependencies like the one reported
> at: https://lkml.org/lkml/2020/4/24/954

Please, do not use lkml.org links.

Also, ideally, we'd pull that kthread_queue_delayed_work() out from
under rq->lock.

In fact, looking at it, WTH is the delayed branch of
kthread_queue_delayed_work() under that lock? That whole
delayed_work_list thing smells like bong-hits.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/1] kthread: break dependency between worker->lock and task_struct->pi_lock
  2020-04-28 16:31 ` Peter Zijlstra
@ 2020-04-28 18:04   ` Suren Baghdasaryan
  2020-04-30 17:57     ` Suren Baghdasaryan
  0 siblings, 1 reply; 4+ messages in thread
From: Suren Baghdasaryan @ 2020-04-28 18:04 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Ingo Molnar, Johannes Weiner, will, Andrew Morton,
	Thomas Gleixner, ben.dooks, cl, ke.wang, Shakeel Butt, LKML,
	kernel-team

On Tue, Apr 28, 2020 at 9:31 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Mon, Apr 27, 2020 at 11:43:58AM -0700, Suren Baghdasaryan wrote:
> > A number of kthread-related functions indirectly take task_struct->pi_lock
> > while holding worker->lock in the call chain like this:
> >     spin_lock(&worker->lock)
> >     kthread_insert_work
> >     wake_up_process
> >     try_to_wake_up
> >     raw_spin_lock_irqsave(&p->pi_lock, flags)
> >
> > This lock dependency exists whenever kthread_insert_work is called either
> > directly or indirectly via __kthread_queue_delayed_work in the following
> > functions:
> >     kthread_queue_work
> >     kthread_delayed_work_timer_fn
> >     kthread_queue_delayed_work
> >     kthread_flush_work
> >     kthread_mod_delayed_work
> >
> > This creates possibilities for circular dependencies like the one reported
> > at: https://lkml.org/lkml/2020/4/24/954
>
> Please, do not use lkml.org links.

Thanks for the review! Would
https://lore.kernel.org/lkml/CAJuCfpG4NkhpQvZjgXZ_3gm6Hf1QgN_eUOQ8iX9Cv1k9whLwSQ@mail.gmail.com
be better or should I just add the body of that report here? Or do not
mention it at all?

>
> Also, ideally, we'd pull that kthread_queue_delayed_work() out from
> under rq->lock.

I understand but I don't see an easy way to do that. We need to start
PSI polling whenever a monitored PSI state changes:
https://elixir.bootlin.com/linux/v5.6.7/source/kernel/sched/psi.c#L783.
This is happening under rq->lock because PSI accounting is done from
inside enqueue_task/dequeue_task - the call chain is:

enqueue_task > psi_enqueue > psi_task_change > psi_group_change >
psi_schedule_poll_work > psi_task_change

IIUC enqueue_task/dequeue_task are called with rq->lock taken, so
moving kthread_queue_delayed_work out is not trivial.

>
> In fact, looking at it, WTH is the delayed branch of
> kthread_queue_delayed_work() under that lock? That whole
> delayed_work_list thing smells like bong-hits.

I have the poll_scheduled atomic specifically to ensure that
kthread_queue_delayed_work does not block as commented here:
https://elixir.bootlin.com/linux/v5.7-rc3/source/kernel/sched/psi.c#L551.
I understand this is not ideal. If there is a better way to schedule
that kworker while ensuring it does not block I would be happy to
rework this. Any suggestions?

>
> --
> To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@android.com.
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/1] kthread: break dependency between worker->lock and task_struct->pi_lock
  2020-04-28 18:04   ` Suren Baghdasaryan
@ 2020-04-30 17:57     ` Suren Baghdasaryan
  0 siblings, 0 replies; 4+ messages in thread
From: Suren Baghdasaryan @ 2020-04-30 17:57 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Ingo Molnar, Johannes Weiner, will, Andrew Morton,
	Thomas Gleixner, ben.dooks, cl, ke.wang, Shakeel Butt, LKML,
	kernel-team

On Tue, Apr 28, 2020 at 11:04 AM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Tue, Apr 28, 2020 at 9:31 AM Peter Zijlstra <peterz@infradead.org> wrote:
> >
> > On Mon, Apr 27, 2020 at 11:43:58AM -0700, Suren Baghdasaryan wrote:
> > > A number of kthread-related functions indirectly take task_struct->pi_lock
> > > while holding worker->lock in the call chain like this:
> > >     spin_lock(&worker->lock)
> > >     kthread_insert_work
> > >     wake_up_process
> > >     try_to_wake_up
> > >     raw_spin_lock_irqsave(&p->pi_lock, flags)
> > >
> > > This lock dependency exists whenever kthread_insert_work is called either
> > > directly or indirectly via __kthread_queue_delayed_work in the following
> > > functions:
> > >     kthread_queue_work
> > >     kthread_delayed_work_timer_fn
> > >     kthread_queue_delayed_work
> > >     kthread_flush_work
> > >     kthread_mod_delayed_work
> > >
> > > This creates possibilities for circular dependencies like the one reported
> > > at: https://lkml.org/lkml/2020/4/24/954
> >
> > Please, do not use lkml.org links.
>
> Thanks for the review! Would
> https://lore.kernel.org/lkml/CAJuCfpG4NkhpQvZjgXZ_3gm6Hf1QgN_eUOQ8iX9Cv1k9whLwSQ@mail.gmail.com
> be better or should I just add the body of that report here? Or do not
> mention it at all?

Sorry, this time in plain text mode...
If there are no more comments on this patch I'll post a v2 with
lore.kernel.org instead of lkml.org link. Please let me know if there
are more issues that you would like to be addressed.
IMHO, taking kthread_queue_delayed_work() out from under rq->lock (if
we can figure out how to do that cleanly) can be a separate patch and
this one is still useful regardless of that.
Thanks!

>
> >
> > Also, ideally, we'd pull that kthread_queue_delayed_work() out from
> > under rq->lock.
>
> I understand but I don't see an easy way to do that. We need to start
> PSI polling whenever a monitored PSI state changes:
> https://elixir.bootlin.com/linux/v5.6.7/source/kernel/sched/psi.c#L783.
> This is happening under rq->lock because PSI accounting is done from
> inside enqueue_task/dequeue_task - the call chain is:
>
> enqueue_task > psi_enqueue > psi_task_change > psi_group_change >
> psi_schedule_poll_work > psi_task_change
>
> IIUC enqueue_task/dequeue_task are called with rq->lock taken, so
> moving kthread_queue_delayed_work out is not trivial.
>
> >
> > In fact, looking at it, WTH is the delayed branch of
> > kthread_queue_delayed_work() under that lock? That whole
> > delayed_work_list thing smells like bong-hits.
>
> I have the poll_scheduled atomic specifically to ensure that
> kthread_queue_delayed_work does not block as commented here:
> https://elixir.bootlin.com/linux/v5.7-rc3/source/kernel/sched/psi.c#L551.
> I understand this is not ideal. If there is a better way to schedule
> that kworker while ensuring it does not block I would be happy to
> rework this. Any suggestions?
>
> >
> > --
> > To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@android.com.
> >

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-04-30 17:57 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-27 18:43 [PATCH 1/1] kthread: break dependency between worker->lock and task_struct->pi_lock Suren Baghdasaryan
2020-04-28 16:31 ` Peter Zijlstra
2020-04-28 18:04   ` Suren Baghdasaryan
2020-04-30 17:57     ` Suren Baghdasaryan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).