* [PATCH] sched: rename __prepare_to_swait() to add_swait_queue_locked()
@ 2021-03-16 11:59 Wang Qing
2021-03-17 5:12 ` Mike Galbraith
0 siblings, 1 reply; 2+ messages in thread
From: Wang Qing @ 2021-03-16 11:59 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Juri Lelli, Vincent Guittot,
Dietmar Eggemann, Steven Rostedt, Ben Segall, Mel Gorman,
Daniel Bristot de Oliveira, linux-kernel
Cc: Wang Qing
This function just puts wait into queue, and does not do an operation similar
to prepare_to_wait() in wait.c.
And during the operation, the caller needs to hold the lock to protect.
Signed-off-by: Wang Qing <wangqing@vivo.com>
---
kernel/sched/completion.c | 2 +-
kernel/sched/sched.h | 2 +-
kernel/sched/swait.c | 6 +++---
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index a778554..3d28a5a
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -79,7 +79,7 @@ do_wait_for_common(struct completion *x,
timeout = -ERESTARTSYS;
break;
}
- __prepare_to_swait(&x->wait, &wait);
+ add_swait_queue_locked(&x->wait, &wait);
__set_current_state(state);
raw_spin_unlock_irq(&x->wait.lock);
timeout = action(timeout);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 10a1522..0516f50
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2719,4 +2719,4 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
#endif
void swake_up_all_locked(struct swait_queue_head *q);
-void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+void add_swait_queue_locked(struct swait_queue_head *q, struct swait_queue *wait);
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
index 7a24925..f48a544
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -82,7 +82,7 @@ void swake_up_all(struct swait_queue_head *q)
}
EXPORT_SYMBOL(swake_up_all);
-void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
+void add_swait_queue_locked(struct swait_queue_head *q, struct swait_queue *wait)
{
wait->task = current;
if (list_empty(&wait->task_list))
@@ -94,7 +94,7 @@ void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *
unsigned long flags;
raw_spin_lock_irqsave(&q->lock, flags);
- __prepare_to_swait(q, wait);
+ add_swait_queue_locked(q, wait);
set_current_state(state);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
@@ -114,7 +114,7 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait
list_del_init(&wait->task_list);
ret = -ERESTARTSYS;
} else {
- __prepare_to_swait(q, wait);
+ add_swait_queue_locked(q, wait);
set_current_state(state);
}
raw_spin_unlock_irqrestore(&q->lock, flags);
--
2.7.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] sched: rename __prepare_to_swait() to add_swait_queue_locked()
2021-03-16 11:59 [PATCH] sched: rename __prepare_to_swait() to add_swait_queue_locked() Wang Qing
@ 2021-03-17 5:12 ` Mike Galbraith
0 siblings, 0 replies; 2+ messages in thread
From: Mike Galbraith @ 2021-03-17 5:12 UTC (permalink / raw)
To: Wang Qing, Ingo Molnar, Peter Zijlstra, Juri Lelli,
Vincent Guittot, Dietmar Eggemann, Steven Rostedt, Ben Segall,
Mel Gorman, Daniel Bristot de Oliveira, linux-kernel
On Tue, 2021-03-16 at 19:59 +0800, Wang Qing wrote:
> This function just puts wait into queue, and does not do an operation similar
> to prepare_to_wait() in wait.c.
> And during the operation, the caller needs to hold the lock to protect.
I see zero benefit to churn like this. You're taking a dinky little
file that's perfectly clear (and pretty), and restating the obvious.
>
> Signed-off-by: Wang Qing <wangqing@vivo.com>
> ---
> kernel/sched/completion.c | 2 +-
> kernel/sched/sched.h | 2 +-
> kernel/sched/swait.c | 6 +++---
> 3 files changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
> index a778554..3d28a5a
> --- a/kernel/sched/completion.c
> +++ b/kernel/sched/completion.c
> @@ -79,7 +79,7 @@ do_wait_for_common(struct completion *x,
> timeout = -ERESTARTSYS;
> break;
> }
> - __prepare_to_swait(&x->wait, &wait);
> + add_swait_queue_locked(&x->wait, &wait);
> __set_current_state(state);
> raw_spin_unlock_irq(&x->wait.lock);
> timeout = action(timeout);
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 10a1522..0516f50
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -2719,4 +2719,4 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
> #endif
>
> void swake_up_all_locked(struct swait_queue_head *q);
> -void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
> +void add_swait_queue_locked(struct swait_queue_head *q, struct swait_queue *wait);
> diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
> index 7a24925..f48a544
> --- a/kernel/sched/swait.c
> +++ b/kernel/sched/swait.c
> @@ -82,7 +82,7 @@ void swake_up_all(struct swait_queue_head *q)
> }
> EXPORT_SYMBOL(swake_up_all);
>
> -void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
> +void add_swait_queue_locked(struct swait_queue_head *q, struct swait_queue *wait)
> {
> wait->task = current;
> if (list_empty(&wait->task_list))
> @@ -94,7 +94,7 @@ void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *
> unsigned long flags;
>
> raw_spin_lock_irqsave(&q->lock, flags);
> - __prepare_to_swait(q, wait);
> + add_swait_queue_locked(q, wait);
> set_current_state(state);
> raw_spin_unlock_irqrestore(&q->lock, flags);
> }
> @@ -114,7 +114,7 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait
> list_del_init(&wait->task_list);
> ret = -ERESTARTSYS;
> } else {
> - __prepare_to_swait(q, wait);
> + add_swait_queue_locked(q, wait);
> set_current_state(state);
> }
> raw_spin_unlock_irqrestore(&q->lock, flags);
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-03-17 5:14 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-16 11:59 [PATCH] sched: rename __prepare_to_swait() to add_swait_queue_locked() Wang Qing
2021-03-17 5:12 ` Mike Galbraith
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).