linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] kthread: convert worker lock to raw spinlock
@ 2019-02-12 16:25 Sebastian Andrzej Siewior
  2019-02-12 16:25 ` [PATCH 2/2] kthread: Do not use TIMER_IRQSAFE Sebastian Andrzej Siewior
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-02-12 16:25 UTC (permalink / raw)
  To: linux-kernel
  Cc: Ingo Molnar, tglx, Julia Cartwright, Sebastian Andrzej Siewior,
	Guenter Roeck, Steffen Trumtrar, Tim Sander

From: Julia Cartwright <julia@ni.com>

In order to enable the queuing of kthread work items from hardirq
context even when PREEMPT_RT_FULL is enabled, convert the worker
spin_lock to a raw_spin_lock.

This is only acceptable to do because the work performed under the lock
is well-bounded and minimal.

Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Guenter Roeck <linux@roeck-us.net>
Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
Reported-by: Tim Sander <tim@krieglstein.org>
Signed-off-by: Julia Cartwright <julia@ni.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 include/linux/kthread.h |  4 ++--
 kernel/kthread.c        | 42 ++++++++++++++++++++---------------------
 2 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311db..6b8c064f0cbcd 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -85,7 +85,7 @@ enum {
 
 struct kthread_worker {
 	unsigned int		flags;
-	spinlock_t		lock;
+	raw_spinlock_t		lock;
 	struct list_head	work_list;
 	struct list_head	delayed_work_list;
 	struct task_struct	*task;
@@ -106,7 +106,7 @@ struct kthread_delayed_work {
 };
 
 #define KTHREAD_WORKER_INIT(worker)	{				\
-	.lock = __SPIN_LOCK_UNLOCKED((worker).lock),			\
+	.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock),		\
 	.work_list = LIST_HEAD_INIT((worker).work_list),		\
 	.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
 	}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d771b53..5641b55783a6f 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
 				struct lock_class_key *key)
 {
 	memset(worker, 0, sizeof(struct kthread_worker));
-	spin_lock_init(&worker->lock);
+	raw_spin_lock_init(&worker->lock);
 	lockdep_set_class_and_name(&worker->lock, key, name);
 	INIT_LIST_HEAD(&worker->work_list);
 	INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr)
 
 	if (kthread_should_stop()) {
 		__set_current_state(TASK_RUNNING);
-		spin_lock_irq(&worker->lock);
+		raw_spin_lock_irq(&worker->lock);
 		worker->task = NULL;
-		spin_unlock_irq(&worker->lock);
+		raw_spin_unlock_irq(&worker->lock);
 		return 0;
 	}
 
 	work = NULL;
-	spin_lock_irq(&worker->lock);
+	raw_spin_lock_irq(&worker->lock);
 	if (!list_empty(&worker->work_list)) {
 		work = list_first_entry(&worker->work_list,
 					struct kthread_work, node);
 		list_del_init(&work->node);
 	}
 	worker->current_work = work;
-	spin_unlock_irq(&worker->lock);
+	raw_spin_unlock_irq(&worker->lock);
 
 	if (work) {
 		__set_current_state(TASK_RUNNING);
@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
 	bool ret = false;
 	unsigned long flags;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	if (!queuing_blocked(worker, work)) {
 		kthread_insert_work(worker, work, &worker->work_list);
 		ret = true;
 	}
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	if (WARN_ON_ONCE(!worker))
 		return;
 
-	spin_lock(&worker->lock);
+	raw_spin_lock(&worker->lock);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	list_del_init(&work->node);
 	kthread_insert_work(worker, work, &worker->work_list);
 
-	spin_unlock(&worker->lock);
+	raw_spin_unlock(&worker->lock);
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
 	unsigned long flags;
 	bool ret = false;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 
 	if (!queuing_blocked(worker, work)) {
 		__kthread_queue_delayed_work(worker, dwork, delay);
 		ret = true;
 	}
 
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
 	if (!worker)
 		return;
 
-	spin_lock_irq(&worker->lock);
+	raw_spin_lock_irq(&worker->lock);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
 	else
 		noop = true;
 
-	spin_unlock_irq(&worker->lock);
+	raw_spin_unlock_irq(&worker->lock);
 
 	if (!noop)
 		wait_for_completion(&fwork.done);
@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
 		 * any queuing is blocked by setting the canceling counter.
 		 */
 		work->canceling++;
-		spin_unlock_irqrestore(&worker->lock, *flags);
+		raw_spin_unlock_irqrestore(&worker->lock, *flags);
 		del_timer_sync(&dwork->timer);
-		spin_lock_irqsave(&worker->lock, *flags);
+		raw_spin_lock_irqsave(&worker->lock, *flags);
 		work->canceling--;
 	}
 
@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 	unsigned long flags;
 	int ret = false;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 
 	/* Do not bother with canceling when never queued. */
 	if (!work->worker)
@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 fast_queue:
 	__kthread_queue_delayed_work(worker, dwork, delay);
 out:
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
 	if (!worker)
 		goto out;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
 	 * In the meantime, block any queuing by setting the canceling counter.
 	 */
 	work->canceling++;
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	kthread_flush_work(work);
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	work->canceling--;
 
 out_fast:
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 out:
 	return ret;
 }
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/2] kthread: Do not use TIMER_IRQSAFE
  2019-02-12 16:25 [PATCH 1/2] kthread: convert worker lock to raw spinlock Sebastian Andrzej Siewior
@ 2019-02-12 16:25 ` Sebastian Andrzej Siewior
  2019-02-13 12:02   ` Petr Mladek
  2019-02-28 10:22   ` [tip:sched/core] " tip-bot for Sebastian Andrzej Siewior
  2019-02-13 12:13 ` [PATCH 1/2] kthread: convert worker lock to raw spinlock Petr Mladek
  2019-02-28 10:22 ` [tip:sched/core] kthread: Convert " tip-bot for Julia Cartwright
  2 siblings, 2 replies; 10+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-02-12 16:25 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, tglx, Sebastian Andrzej Siewior, Petr Mladek

The TIMER_IRQSAFE was introduced in commit

  22597dc3d97b1 ("kthread: initial support for delayed kthread work")

which modelled the delayed kthread code after workqueue's code. The
workqueue code requires the flag TIMER_IRQSAFE for synchronisation
purpose. This is not true for kthread's delay timer since all
operations occur under a lock.

Remove TIMER_IRQSAFE from the timer initialisation.
Use timer_setup() for initialisation purpose which is the official
function.

Cc: Petr Mladek <pmladek@suse.com>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 include/linux/kthread.h | 5 ++---
 kernel/kthread.c        | 5 +++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 6b8c064f0cbcd..3d9d834c66a25 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -164,9 +164,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
 #define kthread_init_delayed_work(dwork, fn)				\
 	do {								\
 		kthread_init_work(&(dwork)->work, (fn));		\
-		__init_timer(&(dwork)->timer,				\
-			     kthread_delayed_work_timer_fn,		\
-			     TIMER_IRQSAFE);				\
+		timer_setup(&(dwork)->timer,				\
+			     kthread_delayed_work_timer_fn, 0);		\
 	} while (0)
 
 int kthread_worker_fn(void *worker_ptr);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5641b55783a6f..5373355412672 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -835,6 +835,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
 	struct kthread_work *work = &dwork->work;
 	struct kthread_worker *worker = work->worker;
+	unsigned long flags;
 
 	/*
 	 * This might happen when a pending work is reinitialized.
@@ -843,7 +844,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	if (WARN_ON_ONCE(!worker))
 		return;
 
-	raw_spin_lock(&worker->lock);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -852,7 +853,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	list_del_init(&work->node);
 	kthread_insert_work(worker, work, &worker->work_list);
 
-	raw_spin_unlock(&worker->lock);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] kthread: Do not use TIMER_IRQSAFE
  2019-02-12 16:25 ` [PATCH 2/2] kthread: Do not use TIMER_IRQSAFE Sebastian Andrzej Siewior
@ 2019-02-13 12:02   ` Petr Mladek
  2019-02-28 10:22   ` [tip:sched/core] " tip-bot for Sebastian Andrzej Siewior
  1 sibling, 0 replies; 10+ messages in thread
From: Petr Mladek @ 2019-02-13 12:02 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior; +Cc: linux-kernel, Ingo Molnar, tglx

On Tue 2019-02-12 17:25:54, Sebastian Andrzej Siewior wrote:
> The TIMER_IRQSAFE was introduced in commit
> 
>   22597dc3d97b1 ("kthread: initial support for delayed kthread work")
> 
> which modelled the delayed kthread code after workqueue's code. The
> workqueue code requires the flag TIMER_IRQSAFE for synchronisation
> purpose. This is not true for kthread's delay timer since all
> operations occur under a lock.

Great catch! The original proposal used a lockless code. We missed
that the irq safe timer was not longer needed with the spin lock.

> Remove TIMER_IRQSAFE from the timer initialisation.
> Use timer_setup() for initialisation purpose which is the official
> function.
> 
> Cc: Petr Mladek <pmladek@suse.com>
> Cc: Ingo Molnar <mingo@kernel.org>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Reviewed-by: Petr Mladek <pmladek@suse.com>

Best Regards,
Petr

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] kthread: convert worker lock to raw spinlock
  2019-02-12 16:25 [PATCH 1/2] kthread: convert worker lock to raw spinlock Sebastian Andrzej Siewior
  2019-02-12 16:25 ` [PATCH 2/2] kthread: Do not use TIMER_IRQSAFE Sebastian Andrzej Siewior
@ 2019-02-13 12:13 ` Petr Mladek
  2019-02-28 10:22 ` [tip:sched/core] kthread: Convert " tip-bot for Julia Cartwright
  2 siblings, 0 replies; 10+ messages in thread
From: Petr Mladek @ 2019-02-13 12:13 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: linux-kernel, Ingo Molnar, tglx, Julia Cartwright, Guenter Roeck,
	Steffen Trumtrar, Tim Sander

On Tue 2019-02-12 17:25:53, Sebastian Andrzej Siewior wrote:
> From: Julia Cartwright <julia@ni.com>
> 
> In order to enable the queuing of kthread work items from hardirq
> context even when PREEMPT_RT_FULL is enabled, convert the worker
> spin_lock to a raw_spin_lock.
> 
> This is only acceptable to do because the work performed under the lock
> is well-bounded and minimal.

I could confirm that it is well-bounded and minimal. The most
expensive function probably is add_timer() called from
__kthread_queue_delayed_work(). It might spin a bit
to get timer->base->lock.

> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Cc: Guenter Roeck <linux@roeck-us.net>
> Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
> Reported-by: Tim Sander <tim@krieglstein.org>
> Signed-off-by: Julia Cartwright <julia@ni.com>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Reviewed-by: Petr Mladek <pmladek@suse.com>

Best Regards,
Petr

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [tip:sched/core] kthread: Convert worker lock to raw spinlock
  2019-02-12 16:25 [PATCH 1/2] kthread: convert worker lock to raw spinlock Sebastian Andrzej Siewior
  2019-02-12 16:25 ` [PATCH 2/2] kthread: Do not use TIMER_IRQSAFE Sebastian Andrzej Siewior
  2019-02-13 12:13 ` [PATCH 1/2] kthread: convert worker lock to raw spinlock Petr Mladek
@ 2019-02-28 10:22 ` tip-bot for Julia Cartwright
  2 siblings, 0 replies; 10+ messages in thread
From: tip-bot for Julia Cartwright @ 2019-02-28 10:22 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: s.trumtrar, tglx, linux, tim, hpa, mingo, bigeasy, linux-kernel,
	pmladek, julia

Commit-ID:  fe99a4f4d6022ec92f9b52a5528cb9b77513e7d1
Gitweb:     https://git.kernel.org/tip/fe99a4f4d6022ec92f9b52a5528cb9b77513e7d1
Author:     Julia Cartwright <julia@ni.com>
AuthorDate: Tue, 12 Feb 2019 17:25:53 +0100
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 28 Feb 2019 11:18:38 +0100

kthread: Convert worker lock to raw spinlock

In order to enable the queuing of kthread work items from hardirq context
even when PREEMPT_RT_FULL is enabled, convert the worker spin_lock to a
raw_spin_lock.

This is only acceptable to do because the work performed under the lock is
well-bounded and minimal.

Reported-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
Reported-by: Tim Sander <tim@krieglstein.org>
Signed-off-by: Julia Cartwright <julia@ni.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Guenter Roeck <linux@roeck-us.net>
Link: https://lkml.kernel.org/r/20190212162554.19779-1-bigeasy@linutronix.de

---
 include/linux/kthread.h |  4 ++--
 kernel/kthread.c        | 42 +++++++++++++++++++++---------------------
 2 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..6b8c064f0cbc 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -85,7 +85,7 @@ enum {
 
 struct kthread_worker {
 	unsigned int		flags;
-	spinlock_t		lock;
+	raw_spinlock_t		lock;
 	struct list_head	work_list;
 	struct list_head	delayed_work_list;
 	struct task_struct	*task;
@@ -106,7 +106,7 @@ struct kthread_delayed_work {
 };
 
 #define KTHREAD_WORKER_INIT(worker)	{				\
-	.lock = __SPIN_LOCK_UNLOCKED((worker).lock),			\
+	.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock),		\
 	.work_list = LIST_HEAD_INIT((worker).work_list),		\
 	.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
 	}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d771b5..5641b55783a6 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
 				struct lock_class_key *key)
 {
 	memset(worker, 0, sizeof(struct kthread_worker));
-	spin_lock_init(&worker->lock);
+	raw_spin_lock_init(&worker->lock);
 	lockdep_set_class_and_name(&worker->lock, key, name);
 	INIT_LIST_HEAD(&worker->work_list);
 	INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -641,21 +641,21 @@ repeat:
 
 	if (kthread_should_stop()) {
 		__set_current_state(TASK_RUNNING);
-		spin_lock_irq(&worker->lock);
+		raw_spin_lock_irq(&worker->lock);
 		worker->task = NULL;
-		spin_unlock_irq(&worker->lock);
+		raw_spin_unlock_irq(&worker->lock);
 		return 0;
 	}
 
 	work = NULL;
-	spin_lock_irq(&worker->lock);
+	raw_spin_lock_irq(&worker->lock);
 	if (!list_empty(&worker->work_list)) {
 		work = list_first_entry(&worker->work_list,
 					struct kthread_work, node);
 		list_del_init(&work->node);
 	}
 	worker->current_work = work;
-	spin_unlock_irq(&worker->lock);
+	raw_spin_unlock_irq(&worker->lock);
 
 	if (work) {
 		__set_current_state(TASK_RUNNING);
@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
 	bool ret = false;
 	unsigned long flags;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	if (!queuing_blocked(worker, work)) {
 		kthread_insert_work(worker, work, &worker->work_list);
 		ret = true;
 	}
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	if (WARN_ON_ONCE(!worker))
 		return;
 
-	spin_lock(&worker->lock);
+	raw_spin_lock(&worker->lock);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	list_del_init(&work->node);
 	kthread_insert_work(worker, work, &worker->work_list);
 
-	spin_unlock(&worker->lock);
+	raw_spin_unlock(&worker->lock);
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
 	unsigned long flags;
 	bool ret = false;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 
 	if (!queuing_blocked(worker, work)) {
 		__kthread_queue_delayed_work(worker, dwork, delay);
 		ret = true;
 	}
 
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
 	if (!worker)
 		return;
 
-	spin_lock_irq(&worker->lock);
+	raw_spin_lock_irq(&worker->lock);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
 	else
 		noop = true;
 
-	spin_unlock_irq(&worker->lock);
+	raw_spin_unlock_irq(&worker->lock);
 
 	if (!noop)
 		wait_for_completion(&fwork.done);
@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
 		 * any queuing is blocked by setting the canceling counter.
 		 */
 		work->canceling++;
-		spin_unlock_irqrestore(&worker->lock, *flags);
+		raw_spin_unlock_irqrestore(&worker->lock, *flags);
 		del_timer_sync(&dwork->timer);
-		spin_lock_irqsave(&worker->lock, *flags);
+		raw_spin_lock_irqsave(&worker->lock, *flags);
 		work->canceling--;
 	}
 
@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 	unsigned long flags;
 	int ret = false;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 
 	/* Do not bother with canceling when never queued. */
 	if (!work->worker)
@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 fast_queue:
 	__kthread_queue_delayed_work(worker, dwork, delay);
 out:
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
 	if (!worker)
 		goto out;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
 	 * In the meantime, block any queuing by setting the canceling counter.
 	 */
 	work->canceling++;
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	kthread_flush_work(work);
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	work->canceling--;
 
 out_fast:
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 out:
 	return ret;
 }

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [tip:sched/core] kthread: Do not use TIMER_IRQSAFE
  2019-02-12 16:25 ` [PATCH 2/2] kthread: Do not use TIMER_IRQSAFE Sebastian Andrzej Siewior
  2019-02-13 12:02   ` Petr Mladek
@ 2019-02-28 10:22   ` tip-bot for Sebastian Andrzej Siewior
  1 sibling, 0 replies; 10+ messages in thread
From: tip-bot for Sebastian Andrzej Siewior @ 2019-02-28 10:22 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: hpa, bigeasy, linux-kernel, pmladek, tglx, mingo

Commit-ID:  ad01423aedaa7c6dd62d560b73a3cb39e6da3901
Gitweb:     https://git.kernel.org/tip/ad01423aedaa7c6dd62d560b73a3cb39e6da3901
Author:     Sebastian Andrzej Siewior <bigeasy@linutronix.de>
AuthorDate: Tue, 12 Feb 2019 17:25:54 +0100
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 28 Feb 2019 11:18:38 +0100

kthread: Do not use TIMER_IRQSAFE

The TIMER_IRQSAFE usage was introduced in commit 22597dc3d97b1 ("kthread:
initial support for delayed kthread work") which modelled the delayed
kthread code after workqueue's code. The workqueue code requires the flag
TIMER_IRQSAFE for synchronisation purpose. This is not true for kthread's
delay timer since all operations occur under a lock.

Remove TIMER_IRQSAFE from the timer initialisation and use timer_setup()
for initialisation purpose which is the official function.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lkml.kernel.org/r/20190212162554.19779-2-bigeasy@linutronix.de

---
 include/linux/kthread.h | 5 ++---
 kernel/kthread.c        | 5 +++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 6b8c064f0cbc..3d9d834c66a2 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -164,9 +164,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
 #define kthread_init_delayed_work(dwork, fn)				\
 	do {								\
 		kthread_init_work(&(dwork)->work, (fn));		\
-		__init_timer(&(dwork)->timer,				\
-			     kthread_delayed_work_timer_fn,		\
-			     TIMER_IRQSAFE);				\
+		timer_setup(&(dwork)->timer,				\
+			     kthread_delayed_work_timer_fn, 0);		\
 	} while (0)
 
 int kthread_worker_fn(void *worker_ptr);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5641b55783a6..537335541267 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -835,6 +835,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
 	struct kthread_work *work = &dwork->work;
 	struct kthread_worker *worker = work->worker;
+	unsigned long flags;
 
 	/*
 	 * This might happen when a pending work is reinitialized.
@@ -843,7 +844,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	if (WARN_ON_ONCE(!worker))
 		return;
 
-	raw_spin_lock(&worker->lock);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -852,7 +853,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	list_del_init(&work->node);
 	kthread_insert_work(worker, work, &worker->work_list);
 
-	raw_spin_unlock(&worker->lock);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] kthread: convert worker lock to raw spinlock
  2018-10-05 18:10   ` Andrea Parri
@ 2018-10-09 10:56     ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 10+ messages in thread
From: Sebastian Andrzej Siewior @ 2018-10-09 10:56 UTC (permalink / raw)
  To: Andrea Parri
  Cc: Julia Cartwright, Ingo Molnar, Thomas Gleixner, Peter Zijlstra,
	linux-kernel, linux-rt-users, Steffen Trumtrar, Tim Sander,
	Guenter Roeck

On 2018-10-05 20:10:35 [+0200], Andrea Parri wrote:
> 
> Clearly not my topic..., but out of curiosity:  What do you mean by
> "well-bounded" and "minimal"?  Can you maybe point me to some doc.?

it means that the lock is not held for an arbitrary amount of time like
by processing a list with thousand items. Well-bounded would mean not
process more than one or five (or so) at a time.

>   Andrea

Sebastian

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] kthread: convert worker lock to raw spinlock
  2018-09-28 21:03 ` [PATCH 1/2] kthread: convert worker lock to raw spinlock Julia Cartwright
  2018-10-05 16:46   ` Sebastian Andrzej Siewior
@ 2018-10-05 18:10   ` Andrea Parri
  2018-10-09 10:56     ` Sebastian Andrzej Siewior
  1 sibling, 1 reply; 10+ messages in thread
From: Andrea Parri @ 2018-10-05 18:10 UTC (permalink / raw)
  To: Julia Cartwright
  Cc: Ingo Molnar, Thomas Gleixner, Peter Zijlstra, linux-kernel,
	linux-rt-users, Steffen Trumtrar, Tim Sander,
	Sebastian Andrzej Siewior, Guenter Roeck

Hi Julia,

On Fri, Sep 28, 2018 at 09:03:51PM +0000, Julia Cartwright wrote:
> In order to enable the queuing of kthread work items from hardirq
> context even when PREEMPT_RT_FULL is enabled, convert the worker
> spin_lock to a raw_spin_lock.
> 
> This is only acceptable to do because the work performed under the lock
> is well-bounded and minimal.

Clearly not my topic..., but out of curiosity:  What do you mean by
"well-bounded" and "minimal"?  Can you maybe point me to some doc.?

  Andrea


> 
> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Cc: Guenter Roeck <linux@roeck-us.net>
> Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
> Reported-by: Tim Sander <tim@krieglstein.org>
> Signed-off-by: Julia Cartwright <julia@ni.com>
> ---
>  include/linux/kthread.h |  2 +-
>  kernel/kthread.c        | 42 ++++++++++++++++++++---------------------
>  2 files changed, 22 insertions(+), 22 deletions(-)
> 
> diff --git a/include/linux/kthread.h b/include/linux/kthread.h
> index c1961761311d..ad292898f7f2 100644
> --- a/include/linux/kthread.h
> +++ b/include/linux/kthread.h
> @@ -85,7 +85,7 @@ enum {
>  
>  struct kthread_worker {
>  	unsigned int		flags;
> -	spinlock_t		lock;
> +	raw_spinlock_t		lock;
>  	struct list_head	work_list;
>  	struct list_head	delayed_work_list;
>  	struct task_struct	*task;
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index 486dedbd9af5..c1d9ee6671c6 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -597,7 +597,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
>  				struct lock_class_key *key)
>  {
>  	memset(worker, 0, sizeof(struct kthread_worker));
> -	spin_lock_init(&worker->lock);
> +	raw_spin_lock_init(&worker->lock);
>  	lockdep_set_class_and_name(&worker->lock, key, name);
>  	INIT_LIST_HEAD(&worker->work_list);
>  	INIT_LIST_HEAD(&worker->delayed_work_list);
> @@ -639,21 +639,21 @@ int kthread_worker_fn(void *worker_ptr)
>  
>  	if (kthread_should_stop()) {
>  		__set_current_state(TASK_RUNNING);
> -		spin_lock_irq(&worker->lock);
> +		raw_spin_lock_irq(&worker->lock);
>  		worker->task = NULL;
> -		spin_unlock_irq(&worker->lock);
> +		raw_spin_unlock_irq(&worker->lock);
>  		return 0;
>  	}
>  
>  	work = NULL;
> -	spin_lock_irq(&worker->lock);
> +	raw_spin_lock_irq(&worker->lock);
>  	if (!list_empty(&worker->work_list)) {
>  		work = list_first_entry(&worker->work_list,
>  					struct kthread_work, node);
>  		list_del_init(&work->node);
>  	}
>  	worker->current_work = work;
> -	spin_unlock_irq(&worker->lock);
> +	raw_spin_unlock_irq(&worker->lock);
>  
>  	if (work) {
>  		__set_current_state(TASK_RUNNING);
> @@ -810,12 +810,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
>  	bool ret = false;
>  	unsigned long flags;
>  
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  	if (!queuing_blocked(worker, work)) {
>  		kthread_insert_work(worker, work, &worker->work_list);
>  		ret = true;
>  	}
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(kthread_queue_work);
> @@ -841,7 +841,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
>  	if (WARN_ON_ONCE(!worker))
>  		return;
>  
> -	spin_lock(&worker->lock);
> +	raw_spin_lock(&worker->lock);
>  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
>  	WARN_ON_ONCE(work->worker != worker);
>  
> @@ -850,7 +850,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
>  	list_del_init(&work->node);
>  	kthread_insert_work(worker, work, &worker->work_list);
>  
> -	spin_unlock(&worker->lock);
> +	raw_spin_unlock(&worker->lock);
>  }
>  EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
>  
> @@ -906,14 +906,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
>  	unsigned long flags;
>  	bool ret = false;
>  
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  
>  	if (!queuing_blocked(worker, work)) {
>  		__kthread_queue_delayed_work(worker, dwork, delay);
>  		ret = true;
>  	}
>  
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
> @@ -949,7 +949,7 @@ void kthread_flush_work(struct kthread_work *work)
>  	if (!worker)
>  		return;
>  
> -	spin_lock_irq(&worker->lock);
> +	raw_spin_lock_irq(&worker->lock);
>  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
>  	WARN_ON_ONCE(work->worker != worker);
>  
> @@ -961,7 +961,7 @@ void kthread_flush_work(struct kthread_work *work)
>  	else
>  		noop = true;
>  
> -	spin_unlock_irq(&worker->lock);
> +	raw_spin_unlock_irq(&worker->lock);
>  
>  	if (!noop)
>  		wait_for_completion(&fwork.done);
> @@ -994,9 +994,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
>  		 * any queuing is blocked by setting the canceling counter.
>  		 */
>  		work->canceling++;
> -		spin_unlock_irqrestore(&worker->lock, *flags);
> +		raw_spin_unlock_irqrestore(&worker->lock, *flags);
>  		del_timer_sync(&dwork->timer);
> -		spin_lock_irqsave(&worker->lock, *flags);
> +		raw_spin_lock_irqsave(&worker->lock, *flags);
>  		work->canceling--;
>  	}
>  
> @@ -1043,7 +1043,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
>  	unsigned long flags;
>  	int ret = false;
>  
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  
>  	/* Do not bother with canceling when never queued. */
>  	if (!work->worker)
> @@ -1060,7 +1060,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
>  fast_queue:
>  	__kthread_queue_delayed_work(worker, dwork, delay);
>  out:
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
> @@ -1074,7 +1074,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
>  	if (!worker)
>  		goto out;
>  
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
>  	WARN_ON_ONCE(work->worker != worker);
>  
> @@ -1088,13 +1088,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
>  	 * In the meantime, block any queuing by setting the canceling counter.
>  	 */
>  	work->canceling++;
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  	kthread_flush_work(work);
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  	work->canceling--;
>  
>  out_fast:
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  out:
>  	return ret;
>  }
> -- 
> 2.18.0
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] kthread: convert worker lock to raw spinlock
  2018-09-28 21:03 ` [PATCH 1/2] kthread: convert worker lock to raw spinlock Julia Cartwright
@ 2018-10-05 16:46   ` Sebastian Andrzej Siewior
  2018-10-05 18:10   ` Andrea Parri
  1 sibling, 0 replies; 10+ messages in thread
From: Sebastian Andrzej Siewior @ 2018-10-05 16:46 UTC (permalink / raw)
  To: Julia Cartwright
  Cc: Ingo Molnar, Thomas Gleixner, Peter Zijlstra, linux-kernel,
	linux-rt-users, Steffen Trumtrar, Tim Sander, Guenter Roeck

On 2018-09-28 21:03:51 [+0000], Julia Cartwright wrote:
> In order to enable the queuing of kthread work items from hardirq
> context even when PREEMPT_RT_FULL is enabled, convert the worker
> spin_lock to a raw_spin_lock.
> 
> This is only acceptable to do because the work performed under the lock
> is well-bounded and minimal.
> 
> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Cc: Guenter Roeck <linux@roeck-us.net>
> Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
> Reported-by: Tim Sander <tim@krieglstein.org>
> Signed-off-by: Julia Cartwright <julia@ni.com>

Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Sebastian

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/2] kthread: convert worker lock to raw spinlock
  2018-09-28 21:03 [PATCH 0/2] Fix watchdogd wakeup deferral on RT Julia Cartwright
@ 2018-09-28 21:03 ` Julia Cartwright
  2018-10-05 16:46   ` Sebastian Andrzej Siewior
  2018-10-05 18:10   ` Andrea Parri
  0 siblings, 2 replies; 10+ messages in thread
From: Julia Cartwright @ 2018-09-28 21:03 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, Peter Zijlstra
  Cc: linux-kernel, linux-rt-users, Steffen Trumtrar, Tim Sander,
	Sebastian Andrzej Siewior, Guenter Roeck

In order to enable the queuing of kthread work items from hardirq
context even when PREEMPT_RT_FULL is enabled, convert the worker
spin_lock to a raw_spin_lock.

This is only acceptable to do because the work performed under the lock
is well-bounded and minimal.

Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Guenter Roeck <linux@roeck-us.net>
Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
Reported-by: Tim Sander <tim@krieglstein.org>
Signed-off-by: Julia Cartwright <julia@ni.com>
---
 include/linux/kthread.h |  2 +-
 kernel/kthread.c        | 42 ++++++++++++++++++++---------------------
 2 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..ad292898f7f2 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -85,7 +85,7 @@ enum {
 
 struct kthread_worker {
 	unsigned int		flags;
-	spinlock_t		lock;
+	raw_spinlock_t		lock;
 	struct list_head	work_list;
 	struct list_head	delayed_work_list;
 	struct task_struct	*task;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 486dedbd9af5..c1d9ee6671c6 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -597,7 +597,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
 				struct lock_class_key *key)
 {
 	memset(worker, 0, sizeof(struct kthread_worker));
-	spin_lock_init(&worker->lock);
+	raw_spin_lock_init(&worker->lock);
 	lockdep_set_class_and_name(&worker->lock, key, name);
 	INIT_LIST_HEAD(&worker->work_list);
 	INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -639,21 +639,21 @@ int kthread_worker_fn(void *worker_ptr)
 
 	if (kthread_should_stop()) {
 		__set_current_state(TASK_RUNNING);
-		spin_lock_irq(&worker->lock);
+		raw_spin_lock_irq(&worker->lock);
 		worker->task = NULL;
-		spin_unlock_irq(&worker->lock);
+		raw_spin_unlock_irq(&worker->lock);
 		return 0;
 	}
 
 	work = NULL;
-	spin_lock_irq(&worker->lock);
+	raw_spin_lock_irq(&worker->lock);
 	if (!list_empty(&worker->work_list)) {
 		work = list_first_entry(&worker->work_list,
 					struct kthread_work, node);
 		list_del_init(&work->node);
 	}
 	worker->current_work = work;
-	spin_unlock_irq(&worker->lock);
+	raw_spin_unlock_irq(&worker->lock);
 
 	if (work) {
 		__set_current_state(TASK_RUNNING);
@@ -810,12 +810,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
 	bool ret = false;
 	unsigned long flags;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	if (!queuing_blocked(worker, work)) {
 		kthread_insert_work(worker, work, &worker->work_list);
 		ret = true;
 	}
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -841,7 +841,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	if (WARN_ON_ONCE(!worker))
 		return;
 
-	spin_lock(&worker->lock);
+	raw_spin_lock(&worker->lock);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -850,7 +850,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 	list_del_init(&work->node);
 	kthread_insert_work(worker, work, &worker->work_list);
 
-	spin_unlock(&worker->lock);
+	raw_spin_unlock(&worker->lock);
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
@@ -906,14 +906,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
 	unsigned long flags;
 	bool ret = false;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 
 	if (!queuing_blocked(worker, work)) {
 		__kthread_queue_delayed_work(worker, dwork, delay);
 		ret = true;
 	}
 
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -949,7 +949,7 @@ void kthread_flush_work(struct kthread_work *work)
 	if (!worker)
 		return;
 
-	spin_lock_irq(&worker->lock);
+	raw_spin_lock_irq(&worker->lock);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -961,7 +961,7 @@ void kthread_flush_work(struct kthread_work *work)
 	else
 		noop = true;
 
-	spin_unlock_irq(&worker->lock);
+	raw_spin_unlock_irq(&worker->lock);
 
 	if (!noop)
 		wait_for_completion(&fwork.done);
@@ -994,9 +994,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
 		 * any queuing is blocked by setting the canceling counter.
 		 */
 		work->canceling++;
-		spin_unlock_irqrestore(&worker->lock, *flags);
+		raw_spin_unlock_irqrestore(&worker->lock, *flags);
 		del_timer_sync(&dwork->timer);
-		spin_lock_irqsave(&worker->lock, *flags);
+		raw_spin_lock_irqsave(&worker->lock, *flags);
 		work->canceling--;
 	}
 
@@ -1043,7 +1043,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 	unsigned long flags;
 	int ret = false;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 
 	/* Do not bother with canceling when never queued. */
 	if (!work->worker)
@@ -1060,7 +1060,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 fast_queue:
 	__kthread_queue_delayed_work(worker, dwork, delay);
 out:
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1074,7 +1074,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
 	if (!worker)
 		goto out;
 
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 	WARN_ON_ONCE(work->worker != worker);
 
@@ -1088,13 +1088,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
 	 * In the meantime, block any queuing by setting the canceling counter.
 	 */
 	work->canceling++;
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 	kthread_flush_work(work);
-	spin_lock_irqsave(&worker->lock, flags);
+	raw_spin_lock_irqsave(&worker->lock, flags);
 	work->canceling--;
 
 out_fast:
-	spin_unlock_irqrestore(&worker->lock, flags);
+	raw_spin_unlock_irqrestore(&worker->lock, flags);
 out:
 	return ret;
 }
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2019-02-28 10:23 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-02-12 16:25 [PATCH 1/2] kthread: convert worker lock to raw spinlock Sebastian Andrzej Siewior
2019-02-12 16:25 ` [PATCH 2/2] kthread: Do not use TIMER_IRQSAFE Sebastian Andrzej Siewior
2019-02-13 12:02   ` Petr Mladek
2019-02-28 10:22   ` [tip:sched/core] " tip-bot for Sebastian Andrzej Siewior
2019-02-13 12:13 ` [PATCH 1/2] kthread: convert worker lock to raw spinlock Petr Mladek
2019-02-28 10:22 ` [tip:sched/core] kthread: Convert " tip-bot for Julia Cartwright
  -- strict thread matches above, loose matches on Subject: below --
2018-09-28 21:03 [PATCH 0/2] Fix watchdogd wakeup deferral on RT Julia Cartwright
2018-09-28 21:03 ` [PATCH 1/2] kthread: convert worker lock to raw spinlock Julia Cartwright
2018-10-05 16:46   ` Sebastian Andrzej Siewior
2018-10-05 18:10   ` Andrea Parri
2018-10-09 10:56     ` Sebastian Andrzej Siewior

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).