* [PATCH] rtc: Avoid flush_scheduled_work() usage
@ 2022-04-19 14:27 Tetsuo Handa
2022-04-30 10:25 ` [PATCH v2] " Tetsuo Handa
0 siblings, 1 reply; 7+ messages in thread
From: Tetsuo Handa @ 2022-04-19 14:27 UTC (permalink / raw)
To: Alessandro Zummo, Alexandre Belloni; +Cc: linux-rtc
Flushing system-wide workqueues is dangerous and will be forbidden.
Replace system_wq with local rtc_wq.
Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
This patch blindly converts schedule_work() into queue_work() within
drivers/rtc/dev.c, based on an assumption that none of work items
outside of drivers/rtc/dev.c needs to be handled by rtc_wq.
Did I convert correctly? Or, do you want to share this rtc_wq with
drivers/rtc/{interface,rtc-88pm860x,rtc-ds1305,rtc-ds1374.c,rtc-imxdi}.c ?
drivers/rtc/dev.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 69325aeede1a..a4a63fc5165f 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -17,6 +17,7 @@
#include "rtc-core.h"
static dev_t rtc_devt;
+static struct workqueue_struct *rtc_wq;
#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
@@ -62,7 +63,7 @@ static void rtc_uie_task(struct work_struct *work)
rtc->uie_timer_active = 1;
rtc->uie_task_active = 0;
add_timer(&rtc->uie_timer);
- } else if (schedule_work(&rtc->uie_task) == 0) {
+ } else if (queue_work(rtc_wq, &rtc->uie_task) == 0) {
rtc->uie_task_active = 0;
}
spin_unlock_irq(&rtc->irq_lock);
@@ -78,7 +79,7 @@ static void rtc_uie_timer(struct timer_list *t)
spin_lock_irqsave(&rtc->irq_lock, flags);
rtc->uie_timer_active = 0;
rtc->uie_task_active = 1;
- if ((schedule_work(&rtc->uie_task) == 0))
+ if (queue_work(rtc_eq, &rtc->uie_task) == 0)
rtc->uie_task_active = 0;
spin_unlock_irqrestore(&rtc->irq_lock, flags);
}
@@ -119,7 +120,7 @@ static int set_uie(struct rtc_device *rtc)
rtc->stop_uie_polling = 0;
rtc->oldsecs = tm.tm_sec;
rtc->uie_task_active = 1;
- if (schedule_work(&rtc->uie_task) == 0)
+ if (queue_work(rtc_wq, &rtc->uie_task) == 0)
rtc->uie_task_active = 0;
}
rtc->irq_data = 0;
@@ -562,6 +563,9 @@ void __init rtc_dev_init(void)
{
int err;
+ rtc_wq = alloc_workqueue("rtc_wq", 0, 0);
+ BUG_ON(!rtc_wq);
+
err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
if (err < 0)
pr_err("failed to allocate char dev region\n");
@@ -571,4 +575,5 @@ void __exit rtc_dev_exit(void)
{
if (rtc_devt)
unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
+ destroy_workqueue(rtc_wq);
}
--
2.32.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v2] rtc: Avoid flush_scheduled_work() usage
2022-04-19 14:27 [PATCH] rtc: Avoid flush_scheduled_work() usage Tetsuo Handa
@ 2022-04-30 10:25 ` Tetsuo Handa
2022-05-17 4:14 ` Tetsuo Handa
2022-05-20 14:33 ` [PATCH v3] " Tetsuo Handa
0 siblings, 2 replies; 7+ messages in thread
From: Tetsuo Handa @ 2022-04-30 10:25 UTC (permalink / raw)
To: Alessandro Zummo, Alexandre Belloni; +Cc: linux-rtc
Flushing system-wide workqueues is dangerous and will be forbidden.
Replace system_wq with local rtc_wq.
While we are at it, remove unused rtc_dev_exit().
Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
Changes in v2:
Remove unused rtc_dev_exit().
Since rtc_dev_init() is built into vmlinux, there is no point with
recovery.
This patch blindly converts schedule_work() into queue_work() within
drivers/rtc/dev.c, based on an assumption that none of work items
outside of drivers/rtc/dev.c needs to be handled by rtc_wq.
Did I convert correctly?
drivers/rtc/dev.c | 16 +++++++---------
drivers/rtc/rtc-core.h | 5 -----
2 files changed, 7 insertions(+), 14 deletions(-)
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 69325aeede1a..aff171979a99 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -17,6 +17,7 @@
#include "rtc-core.h"
static dev_t rtc_devt;
+static struct workqueue_struct *rtc_wq;
#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
@@ -62,7 +63,7 @@ static void rtc_uie_task(struct work_struct *work)
rtc->uie_timer_active = 1;
rtc->uie_task_active = 0;
add_timer(&rtc->uie_timer);
- } else if (schedule_work(&rtc->uie_task) == 0) {
+ } else if (queue_work(rtc_wq, &rtc->uie_task) == 0) {
rtc->uie_task_active = 0;
}
spin_unlock_irq(&rtc->irq_lock);
@@ -78,7 +79,7 @@ static void rtc_uie_timer(struct timer_list *t)
spin_lock_irqsave(&rtc->irq_lock, flags);
rtc->uie_timer_active = 0;
rtc->uie_task_active = 1;
- if ((schedule_work(&rtc->uie_task) == 0))
+ if (queue_work(rtc_eq, &rtc->uie_task) == 0)
rtc->uie_task_active = 0;
spin_unlock_irqrestore(&rtc->irq_lock, flags);
}
@@ -119,7 +120,7 @@ static int set_uie(struct rtc_device *rtc)
rtc->stop_uie_polling = 0;
rtc->oldsecs = tm.tm_sec;
rtc->uie_task_active = 1;
- if (schedule_work(&rtc->uie_task) == 0)
+ if (queue_work(rtc_wq, &rtc->uie_task) == 0)
rtc->uie_task_active = 0;
}
rtc->irq_data = 0;
@@ -562,13 +563,10 @@ void __init rtc_dev_init(void)
{
int err;
+ rtc_wq = alloc_workqueue("rtc_wq", 0, 0);
+ BUG_ON(!rtc_wq);
+
err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
if (err < 0)
pr_err("failed to allocate char dev region\n");
}
-
-void __exit rtc_dev_exit(void)
-{
- if (rtc_devt)
- unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
-}
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 0abf98983e13..4b10a1b8f370 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -2,7 +2,6 @@
#ifdef CONFIG_RTC_INTF_DEV
extern void __init rtc_dev_init(void);
-extern void __exit rtc_dev_exit(void);
extern void rtc_dev_prepare(struct rtc_device *rtc);
#else
@@ -11,10 +10,6 @@ static inline void rtc_dev_init(void)
{
}
-static inline void rtc_dev_exit(void)
-{
-}
-
static inline void rtc_dev_prepare(struct rtc_device *rtc)
{
}
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH v2] rtc: Avoid flush_scheduled_work() usage
2022-04-30 10:25 ` [PATCH v2] " Tetsuo Handa
@ 2022-05-17 4:14 ` Tetsuo Handa
2022-05-20 14:33 ` [PATCH v3] " Tetsuo Handa
1 sibling, 0 replies; 7+ messages in thread
From: Tetsuo Handa @ 2022-05-17 4:14 UTC (permalink / raw)
To: Alessandro Zummo, Alexandre Belloni; +Cc: linux-rtc
Any questions?
On 2022/04/30 19:25, Tetsuo Handa wrote:
> Flushing system-wide workqueues is dangerous and will be forbidden.
> Replace system_wq with local rtc_wq.
>
> While we are at it, remove unused rtc_dev_exit().
>
> Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
> ---
> Changes in v2:
> Remove unused rtc_dev_exit().
>
> Since rtc_dev_init() is built into vmlinux, there is no point with
> recovery.
>
> This patch blindly converts schedule_work() into queue_work() within
> drivers/rtc/dev.c, based on an assumption that none of work items
> outside of drivers/rtc/dev.c needs to be handled by rtc_wq.
> Did I convert correctly?
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v3] rtc: Avoid flush_scheduled_work() usage
2022-04-30 10:25 ` [PATCH v2] " Tetsuo Handa
2022-05-17 4:14 ` Tetsuo Handa
@ 2022-05-20 14:33 ` Tetsuo Handa
2022-05-20 16:19 ` Alexandre Belloni
1 sibling, 1 reply; 7+ messages in thread
From: Tetsuo Handa @ 2022-05-20 14:33 UTC (permalink / raw)
To: Alessandro Zummo, Alexandre Belloni; +Cc: linux-rtc
Flushing system-wide workqueues is dangerous and will be forbidden.
Replace system_wq with local rtc_wq.
While we are at it, remove unused rtc_dev_exit().
Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
Changes in v3:
Fix build error.
Forgot to replace flush_scheduled_work().
Changes in v2:
Remove unused rtc_dev_exit().
Since rtc_dev_init() is built into vmlinux, there is no point with
recovery.
This patch blindly converts schedule_work() into queue_work() within
drivers/rtc/dev.c, based on an assumption that none of work items
outside of drivers/rtc/dev.c needs to be handled by rtc_wq.
Did I convert correctly?
drivers/rtc/dev.c | 18 ++++++++----------
drivers/rtc/rtc-core.h | 5 -----
2 files changed, 8 insertions(+), 15 deletions(-)
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 69325aeede1a..a7346f03a5b2 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -17,6 +17,7 @@
#include "rtc-core.h"
static dev_t rtc_devt;
+static struct workqueue_struct *rtc_wq;
#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
@@ -62,7 +63,7 @@ static void rtc_uie_task(struct work_struct *work)
rtc->uie_timer_active = 1;
rtc->uie_task_active = 0;
add_timer(&rtc->uie_timer);
- } else if (schedule_work(&rtc->uie_task) == 0) {
+ } else if (queue_work(rtc_wq, &rtc->uie_task) == 0) {
rtc->uie_task_active = 0;
}
spin_unlock_irq(&rtc->irq_lock);
@@ -78,7 +79,7 @@ static void rtc_uie_timer(struct timer_list *t)
spin_lock_irqsave(&rtc->irq_lock, flags);
rtc->uie_timer_active = 0;
rtc->uie_task_active = 1;
- if ((schedule_work(&rtc->uie_task) == 0))
+ if (queue_work(rtc_wq, &rtc->uie_task) == 0)
rtc->uie_task_active = 0;
spin_unlock_irqrestore(&rtc->irq_lock, flags);
}
@@ -96,7 +97,7 @@ static int clear_uie(struct rtc_device *rtc)
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
- flush_scheduled_work();
+ flush_workqueue(rtc_wq);
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;
@@ -119,7 +120,7 @@ static int set_uie(struct rtc_device *rtc)
rtc->stop_uie_polling = 0;
rtc->oldsecs = tm.tm_sec;
rtc->uie_task_active = 1;
- if (schedule_work(&rtc->uie_task) == 0)
+ if (queue_work(rtc_wq, &rtc->uie_task) == 0)
rtc->uie_task_active = 0;
}
rtc->irq_data = 0;
@@ -562,13 +563,10 @@ void __init rtc_dev_init(void)
{
int err;
+ rtc_wq = alloc_workqueue("rtc_wq", 0, 0);
+ BUG_ON(!rtc_wq);
+
err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
if (err < 0)
pr_err("failed to allocate char dev region\n");
}
-
-void __exit rtc_dev_exit(void)
-{
- if (rtc_devt)
- unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
-}
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 0abf98983e13..4b10a1b8f370 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -2,7 +2,6 @@
#ifdef CONFIG_RTC_INTF_DEV
extern void __init rtc_dev_init(void);
-extern void __exit rtc_dev_exit(void);
extern void rtc_dev_prepare(struct rtc_device *rtc);
#else
@@ -11,10 +10,6 @@ static inline void rtc_dev_init(void)
{
}
-static inline void rtc_dev_exit(void)
-{
-}
-
static inline void rtc_dev_prepare(struct rtc_device *rtc)
{
}
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH v3] rtc: Avoid flush_scheduled_work() usage
2022-05-20 14:33 ` [PATCH v3] " Tetsuo Handa
@ 2022-05-20 16:19 ` Alexandre Belloni
2022-06-10 10:48 ` [PATCH v4] rtc: Replace flush_scheduled_work() with flush_work() Tetsuo Handa
0 siblings, 1 reply; 7+ messages in thread
From: Alexandre Belloni @ 2022-05-20 16:19 UTC (permalink / raw)
To: Tetsuo Handa; +Cc: Alessandro Zummo, linux-rtc
Hello,
On 20/05/2022 23:33:47+0900, Tetsuo Handa wrote:
> Flushing system-wide workqueues is dangerous and will be forbidden.
> Replace system_wq with local rtc_wq.
>
> While we are at it, remove unused rtc_dev_exit().
>
> Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
> ---
> Changes in v3:
> Fix build error.
> Forgot to replace flush_scheduled_work().
>
> Changes in v2:
> Remove unused rtc_dev_exit().
>
> Since rtc_dev_init() is built into vmlinux, there is no point with
> recovery.
>
> This patch blindly converts schedule_work() into queue_work() within
> drivers/rtc/dev.c, based on an assumption that none of work items
> outside of drivers/rtc/dev.c needs to be handled by rtc_wq.
> Did I convert correctly?
>
Yes and no, this could be a bit more clever and create the workqueue
only for the devices that actually need it. I worked on something after
seeing your first email a while ago but I needed more time to test it.
> drivers/rtc/dev.c | 18 ++++++++----------
> drivers/rtc/rtc-core.h | 5 -----
> 2 files changed, 8 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
> index 69325aeede1a..a7346f03a5b2 100644
> --- a/drivers/rtc/dev.c
> +++ b/drivers/rtc/dev.c
> @@ -17,6 +17,7 @@
> #include "rtc-core.h"
>
> static dev_t rtc_devt;
> +static struct workqueue_struct *rtc_wq;
>
> #define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
>
> @@ -62,7 +63,7 @@ static void rtc_uie_task(struct work_struct *work)
> rtc->uie_timer_active = 1;
> rtc->uie_task_active = 0;
> add_timer(&rtc->uie_timer);
> - } else if (schedule_work(&rtc->uie_task) == 0) {
> + } else if (queue_work(rtc_wq, &rtc->uie_task) == 0) {
> rtc->uie_task_active = 0;
> }
> spin_unlock_irq(&rtc->irq_lock);
> @@ -78,7 +79,7 @@ static void rtc_uie_timer(struct timer_list *t)
> spin_lock_irqsave(&rtc->irq_lock, flags);
> rtc->uie_timer_active = 0;
> rtc->uie_task_active = 1;
> - if ((schedule_work(&rtc->uie_task) == 0))
> + if (queue_work(rtc_wq, &rtc->uie_task) == 0)
> rtc->uie_task_active = 0;
> spin_unlock_irqrestore(&rtc->irq_lock, flags);
> }
> @@ -96,7 +97,7 @@ static int clear_uie(struct rtc_device *rtc)
> }
> if (rtc->uie_task_active) {
> spin_unlock_irq(&rtc->irq_lock);
> - flush_scheduled_work();
> + flush_workqueue(rtc_wq);
> spin_lock_irq(&rtc->irq_lock);
> }
> rtc->uie_irq_active = 0;
> @@ -119,7 +120,7 @@ static int set_uie(struct rtc_device *rtc)
> rtc->stop_uie_polling = 0;
> rtc->oldsecs = tm.tm_sec;
> rtc->uie_task_active = 1;
> - if (schedule_work(&rtc->uie_task) == 0)
> + if (queue_work(rtc_wq, &rtc->uie_task) == 0)
> rtc->uie_task_active = 0;
> }
> rtc->irq_data = 0;
> @@ -562,13 +563,10 @@ void __init rtc_dev_init(void)
> {
> int err;
>
> + rtc_wq = alloc_workqueue("rtc_wq", 0, 0);
> + BUG_ON(!rtc_wq);
> +
> err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
> if (err < 0)
> pr_err("failed to allocate char dev region\n");
> }
> -
> -void __exit rtc_dev_exit(void)
> -{
> - if (rtc_devt)
> - unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
This is very unrelated and should be in a different patch.
> -}
> diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
> index 0abf98983e13..4b10a1b8f370 100644
> --- a/drivers/rtc/rtc-core.h
> +++ b/drivers/rtc/rtc-core.h
> @@ -2,7 +2,6 @@
> #ifdef CONFIG_RTC_INTF_DEV
>
> extern void __init rtc_dev_init(void);
> -extern void __exit rtc_dev_exit(void);
> extern void rtc_dev_prepare(struct rtc_device *rtc);
>
> #else
> @@ -11,10 +10,6 @@ static inline void rtc_dev_init(void)
> {
> }
>
> -static inline void rtc_dev_exit(void)
> -{
> -}
> -
> static inline void rtc_dev_prepare(struct rtc_device *rtc)
> {
> }
> --
> 2.34.1
>
>
--
Alexandre Belloni, co-owner and COO, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v4] rtc: Replace flush_scheduled_work() with flush_work().
2022-05-20 16:19 ` Alexandre Belloni
@ 2022-06-10 10:48 ` Tetsuo Handa
2022-06-24 17:14 ` Alexandre Belloni
0 siblings, 1 reply; 7+ messages in thread
From: Tetsuo Handa @ 2022-06-10 10:48 UTC (permalink / raw)
To: Alexandre Belloni, Alessandro Zummo; +Cc: linux-rtc
Since "struct rtc_device" is per a device struct, I assume that clear_uie()
needs to wait for only one work associated with that device. Therefore,
wait for only that work using flush_work().
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
Please see commit c4f135d643823a86 ("workqueue: Wrap flush_workqueue()
using a macro") for background.
Changes in v4:
Use flush_work() instead of introducing a dedicated WQ.
drivers/rtc/dev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 69325aeede1a..5cf90daf975c 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -96,7 +96,7 @@ static int clear_uie(struct rtc_device *rtc)
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
- flush_scheduled_work();
+ flush_work(&rtc->uie_task);
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;
--
2.18.4
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH v4] rtc: Replace flush_scheduled_work() with flush_work().
2022-06-10 10:48 ` [PATCH v4] rtc: Replace flush_scheduled_work() with flush_work() Tetsuo Handa
@ 2022-06-24 17:14 ` Alexandre Belloni
0 siblings, 0 replies; 7+ messages in thread
From: Alexandre Belloni @ 2022-06-24 17:14 UTC (permalink / raw)
To: penguin-kernel, a.zummo; +Cc: linux-rtc
On Fri, 10 Jun 2022 19:48:36 +0900, Tetsuo Handa wrote:
> Since "struct rtc_device" is per a device struct, I assume that clear_uie()
> needs to wait for only one work associated with that device. Therefore,
> wait for only that work using flush_work().
>
>
Applied, thanks!
[1/1] rtc: Replace flush_scheduled_work() with flush_work().
commit: ff30b86c76276c2f858744b8bc04b386b4a36c94
Best regards,
--
Alexandre Belloni, co-owner and COO, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2022-06-24 17:15 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-19 14:27 [PATCH] rtc: Avoid flush_scheduled_work() usage Tetsuo Handa
2022-04-30 10:25 ` [PATCH v2] " Tetsuo Handa
2022-05-17 4:14 ` Tetsuo Handa
2022-05-20 14:33 ` [PATCH v3] " Tetsuo Handa
2022-05-20 16:19 ` Alexandre Belloni
2022-06-10 10:48 ` [PATCH v4] rtc: Replace flush_scheduled_work() with flush_work() Tetsuo Handa
2022-06-24 17:14 ` Alexandre Belloni
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.