* [PATCH v2] Adjustments: lock/unlock task in context_switch
@ 2017-12-15 14:06 ` rodrigosiqueira
0 siblings, 0 replies; 9+ messages in thread
From: rodrigosiqueira @ 2017-12-15 14:06 UTC (permalink / raw)
To: peterz; +Cc: kernel-janitors, linux-kernel
Function prepare_lock_switch have an unused parameter, and also the
function name was not descriptive. To improve the readability and remove
the extra parameter, the following changes were made:
* Moved prepare_lock_switch from kernel/sched/sched.h to
kernel/sched/core.c, renamed it to acquire_lock_task, and removed the
unused parameter.
* Split the smp_store_release() out from finish_lock_switch() to a
function named release_lock_task.
* Comments ajdustments.
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
---
kernel/sched/core.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++----
kernel/sched/sched.h | 41 -----------------------------------------
2 files changed, 48 insertions(+), 45 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 75554f366fd3..5b36a2d2b1ee 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
- * Pairs with the smp_store_release() in finish_lock_switch().
+ * Pairs with the smp_store_release() in release_lock_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
@@ -2571,6 +2571,49 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
#endif /* CONFIG_PREEMPT_NOTIFIERS */
+static inline void acquire_lock_task(struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Avoid task to be moved to a different CPU
+ */
+ next->on_cpu = 1;
+#endif
+}
+
+static inline void release_lock_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ *
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+ */
+ smp_store_release(&prev->on_cpu, 0);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+ raw_spin_unlock_irq(&rq->lock);
+}
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -2591,7 +2634,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
- prepare_lock_switch(rq, next);
+ acquire_lock_task(next);
prepare_arch_switch(next);
}
@@ -2646,7 +2689,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* the scheduled task must drop that reference.
*
* We must observe prev->state before clearing prev->on_cpu (in
- * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * release_lock_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
@@ -2663,7 +2706,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* to use.
*/
smp_mb__after_unlock_lock();
- finish_lock_switch(rq, prev);
+ release_lock_task(prev);
+ finish_lock_switch(rq);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b19552a212de..43f5d6e936bb 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
# define finish_arch_post_lock_switch() do { } while (0)
#endif
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- *
- * In particular, the load of prev->state in finish_task_switch() must
- * happen before this.
- *
- * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
- */
- smp_store_release(&prev->on_cpu, 0);
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- /* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
-#endif
- /*
- * If we are tracking spinlock dependencies then we have to
- * fix up the runqueue lock - which gets 'carried over' from
- * prev into current:
- */
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
- raw_spin_unlock_irq(&rq->lock);
-}
-
/*
* wake flags
*/
--
2.15.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH v2] Adjustments: lock/unlock task in context_switch
@ 2017-12-15 14:06 ` rodrigosiqueira
0 siblings, 0 replies; 9+ messages in thread
From: rodrigosiqueira @ 2017-12-15 14:06 UTC (permalink / raw)
To: peterz; +Cc: kernel-janitors, linux-kernel
Function prepare_lock_switch have an unused parameter, and also the
function name was not descriptive. To improve the readability and remove
the extra parameter, the following changes were made:
* Moved prepare_lock_switch from kernel/sched/sched.h to
kernel/sched/core.c, renamed it to acquire_lock_task, and removed the
unused parameter.
* Split the smp_store_release() out from finish_lock_switch() to a
function named release_lock_task.
* Comments ajdustments.
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
---
kernel/sched/core.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++----
kernel/sched/sched.h | 41 -----------------------------------------
2 files changed, 48 insertions(+), 45 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 75554f366fd3..5b36a2d2b1ee 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
- * Pairs with the smp_store_release() in finish_lock_switch().
+ * Pairs with the smp_store_release() in release_lock_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
@@ -2571,6 +2571,49 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
#endif /* CONFIG_PREEMPT_NOTIFIERS */
+static inline void acquire_lock_task(struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Avoid task to be moved to a different CPU
+ */
+ next->on_cpu = 1;
+#endif
+}
+
+static inline void release_lock_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ *
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+ */
+ smp_store_release(&prev->on_cpu, 0);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+ raw_spin_unlock_irq(&rq->lock);
+}
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -2591,7 +2634,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
- prepare_lock_switch(rq, next);
+ acquire_lock_task(next);
prepare_arch_switch(next);
}
@@ -2646,7 +2689,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* the scheduled task must drop that reference.
*
* We must observe prev->state before clearing prev->on_cpu (in
- * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * release_lock_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
@@ -2663,7 +2706,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* to use.
*/
smp_mb__after_unlock_lock();
- finish_lock_switch(rq, prev);
+ release_lock_task(prev);
+ finish_lock_switch(rq);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b19552a212de..43f5d6e936bb 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
# define finish_arch_post_lock_switch() do { } while (0)
#endif
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- *
- * In particular, the load of prev->state in finish_task_switch() must
- * happen before this.
- *
- * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
- */
- smp_store_release(&prev->on_cpu, 0);
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- /* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
-#endif
- /*
- * If we are tracking spinlock dependencies then we have to
- * fix up the runqueue lock - which gets 'carried over' from
- * prev into current:
- */
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
- raw_spin_unlock_irq(&rq->lock);
-}
-
/*
* wake flags
*/
--
2.15.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH v2] Adjustments: lock/unlock task in context_switch
2017-12-15 14:06 ` rodrigosiqueira
@ 2017-12-18 19:30 ` Peter Zijlstra
-1 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2017-12-18 19:30 UTC (permalink / raw)
To: rodrigosiqueira, Ingo Molnar; +Cc: kernel-janitors, linux-kernel
Thanks; I've slightly changed it, find below. I'll queue it for the next
merge window.
---
Subject: sched: Rework / clarify prepare_lock_switch()
From: rodrigosiqueira <rodrigosiqueiramelo@gmail.com>
Date: Fri, 15 Dec 2017 12:06:03 -0200
The function prepare_lock_switch has an unused parameter, and also the
function name was not descriptive. To improve the readability and remove
the extra parameter, the following changes were made:
* Moved prepare_lock_switch from kernel/sched/sched.h to
kernel/sched/core.c, renamed it to acquire_task, and removed the
unused parameter.
* Split the smp_store_release() out from finish_lock_switch() to a
function named release_task.
* Comments ajdustments.
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171215140603.gxe5i2y6fg5ojfpp@smtp.gmail.com
---
kernel/sched/core.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++----
kernel/sched/sched.h | 41 ---------------------------------------
2 files changed, 49 insertions(+), 45 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, un
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
- * Pairs with the smp_store_release() in finish_lock_switch().
+ * Pairs with the smp_store_release() in release_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
@@ -2571,6 +2571,50 @@ fire_sched_out_preempt_notifiers(struct
#endif /* CONFIG_PREEMPT_NOTIFIERS */
+static inline void acquire_task(struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Claim the task as running, we do this before switching to it
+ * such that any running task will have this set.
+ */
+ next->on_cpu = 1;
+#endif
+}
+
+static inline void release_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ *
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+ */
+ smp_store_release(&prev->on_cpu, 0);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+ raw_spin_unlock_irq(&rq->lock);
+}
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -2591,7 +2635,7 @@ prepare_task_switch(struct rq *rq, struc
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
- prepare_lock_switch(rq, next);
+ acquire_task(next);
prepare_arch_switch(next);
}
@@ -2646,7 +2690,7 @@ static struct rq *finish_task_switch(str
* the scheduled task must drop that reference.
*
* We must observe prev->state before clearing prev->on_cpu (in
- * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * release_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
@@ -2663,7 +2707,8 @@ static struct rq *finish_task_switch(str
* to use.
*/
smp_mb__after_unlock_lock();
- finish_lock_switch(rq, prev);
+ release_task(prev);
+ finish_lock_switch(rq);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(s
# define finish_arch_post_lock_switch() do { } while (0)
#endif
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- *
- * In particular, the load of prev->state in finish_task_switch() must
- * happen before this.
- *
- * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
- */
- smp_store_release(&prev->on_cpu, 0);
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- /* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
-#endif
- /*
- * If we are tracking spinlock dependencies then we have to
- * fix up the runqueue lock - which gets 'carried over' from
- * prev into current:
- */
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
- raw_spin_unlock_irq(&rq->lock);
-}
-
/*
* wake flags
*/
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] Adjustments: lock/unlock task in context_switch
@ 2017-12-18 19:30 ` Peter Zijlstra
0 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2017-12-18 19:30 UTC (permalink / raw)
To: rodrigosiqueira, Ingo Molnar; +Cc: kernel-janitors, linux-kernel
Thanks; I've slightly changed it, find below. I'll queue it for the next
merge window.
---
Subject: sched: Rework / clarify prepare_lock_switch()
From: rodrigosiqueira <rodrigosiqueiramelo@gmail.com>
Date: Fri, 15 Dec 2017 12:06:03 -0200
The function prepare_lock_switch has an unused parameter, and also the
function name was not descriptive. To improve the readability and remove
the extra parameter, the following changes were made:
* Moved prepare_lock_switch from kernel/sched/sched.h to
kernel/sched/core.c, renamed it to acquire_task, and removed the
unused parameter.
* Split the smp_store_release() out from finish_lock_switch() to a
function named release_task.
* Comments ajdustments.
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171215140603.gxe5i2y6fg5ojfpp@smtp.gmail.com
---
kernel/sched/core.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++----
kernel/sched/sched.h | 41 ---------------------------------------
2 files changed, 49 insertions(+), 45 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, un
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
- * Pairs with the smp_store_release() in finish_lock_switch().
+ * Pairs with the smp_store_release() in release_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
@@ -2571,6 +2571,50 @@ fire_sched_out_preempt_notifiers(struct
#endif /* CONFIG_PREEMPT_NOTIFIERS */
+static inline void acquire_task(struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Claim the task as running, we do this before switching to it
+ * such that any running task will have this set.
+ */
+ next->on_cpu = 1;
+#endif
+}
+
+static inline void release_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ *
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+ */
+ smp_store_release(&prev->on_cpu, 0);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+ raw_spin_unlock_irq(&rq->lock);
+}
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -2591,7 +2635,7 @@ prepare_task_switch(struct rq *rq, struc
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
- prepare_lock_switch(rq, next);
+ acquire_task(next);
prepare_arch_switch(next);
}
@@ -2646,7 +2690,7 @@ static struct rq *finish_task_switch(str
* the scheduled task must drop that reference.
*
* We must observe prev->state before clearing prev->on_cpu (in
- * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * release_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
@@ -2663,7 +2707,8 @@ static struct rq *finish_task_switch(str
* to use.
*/
smp_mb__after_unlock_lock();
- finish_lock_switch(rq, prev);
+ release_task(prev);
+ finish_lock_switch(rq);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(s
# define finish_arch_post_lock_switch() do { } while (0)
#endif
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- *
- * In particular, the load of prev->state in finish_task_switch() must
- * happen before this.
- *
- * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
- */
- smp_store_release(&prev->on_cpu, 0);
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- /* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
-#endif
- /*
- * If we are tracking spinlock dependencies then we have to
- * fix up the runqueue lock - which gets 'carried over' from
- * prev into current:
- */
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
- raw_spin_unlock_irq(&rq->lock);
-}
-
/*
* wake flags
*/
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] Adjustments: lock/unlock task in context_switch
2017-12-18 19:30 ` Peter Zijlstra
@ 2017-12-19 14:23 ` Rodrigo Siqueira
-1 siblings, 0 replies; 9+ messages in thread
From: Rodrigo Siqueira @ 2017-12-19 14:23 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: Ingo Molnar, kernel-janitors, linux-kernel
Thanks for the review :)
Below I just have a small comment in the changed version of the patch
> Thanks; I've slightly changed it, find below. I'll queue it for the next
> merge window.
>
> ---
> Subject: sched: Rework / clarify prepare_lock_switch()
> From: rodrigosiqueira <rodrigosiqueiramelo@gmail.com>
> Date: Fri, 15 Dec 2017 12:06:03 -0200
>
> The function prepare_lock_switch has an unused parameter, and also the
> function name was not descriptive. To improve the readability and remove
> the extra parameter, the following changes were made:
>
> * Moved prepare_lock_switch from kernel/sched/sched.h to
> kernel/sched/core.c, renamed it to acquire_task, and removed the
> unused parameter.
>
> * Split the smp_store_release() out from finish_lock_switch() to a
> function named release_task.
>
> * Comments ajdustments.
>
> Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Link: http://lkml.kernel.org/r/20171215140603.gxe5i2y6fg5ojfpp@smtp.gmail.com
> ---
> kernel/sched/core.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++----
> kernel/sched/sched.h | 41 ---------------------------------------
> 2 files changed, 49 insertions(+), 45 deletions(-)
>
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, un
> * If the owning (remote) CPU is still in the middle of schedule() with
> * this task as prev, wait until its done referencing the task.
> *
> - * Pairs with the smp_store_release() in finish_lock_switch().
> + * Pairs with the smp_store_release() in release_task().
> *
> * This ensures that tasks getting woken will be fully ordered against
> * their previous state and preserve Program Order.
> @@ -2571,6 +2571,50 @@ fire_sched_out_preempt_notifiers(struct
>
> #endif /* CONFIG_PREEMPT_NOTIFIERS */
>
> +static inline void acquire_task(struct task_struct *next)
In the original patch, I called this function as release_lock_task,
because the release_task was already declared as extern in
include/linux/sched/task.h. I believe there is a function name conflict
here, is that correct?
> +{
> +#ifdef CONFIG_SMP
> + /*
> + * Claim the task as running, we do this before switching to it
> + * such that any running task will have this set.
> + */
> + next->on_cpu = 1;
> +#endif
> +}
> +
> +static inline void release_task(struct task_struct *prev)
> +{
> +#ifdef CONFIG_SMP
> + /*
> + * After ->on_cpu is cleared, the task can be moved to a different CPU.
> + * We must ensure this doesn't happen until the switch is completely
> + * finished.
> + *
> + * In particular, the load of prev->state in finish_task_switch() must
> + * happen before this.
> + *
> + * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
> + */
> + smp_store_release(&prev->on_cpu, 0);
> +#endif
> +}
> +
> +static inline void finish_lock_switch(struct rq *rq)
> +{
> +#ifdef CONFIG_DEBUG_SPINLOCK
> + /* this is a valid case when another task releases the spinlock */
> + rq->lock.owner = current;
> +#endif
> + /*
> + * If we are tracking spinlock dependencies then we have to
> + * fix up the runqueue lock - which gets 'carried over' from
> + * prev into current:
> + */
> + spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
> +
> + raw_spin_unlock_irq(&rq->lock);
> +}
> +
> /**
> * prepare_task_switch - prepare to switch tasks
> * @rq: the runqueue preparing to switch
> @@ -2591,7 +2635,7 @@ prepare_task_switch(struct rq *rq, struc
> sched_info_switch(rq, prev, next);
> perf_event_task_sched_out(prev, next);
> fire_sched_out_preempt_notifiers(prev, next);
> - prepare_lock_switch(rq, next);
> + acquire_task(next);
> prepare_arch_switch(next);
> }
>
> @@ -2646,7 +2690,7 @@ static struct rq *finish_task_switch(str
> * the scheduled task must drop that reference.
> *
> * We must observe prev->state before clearing prev->on_cpu (in
> - * finish_lock_switch), otherwise a concurrent wakeup can get prev
> + * release_task), otherwise a concurrent wakeup can get prev
> * running on another CPU and we could rave with its RUNNING -> DEAD
> * transition, resulting in a double drop.
> */
> @@ -2663,7 +2707,8 @@ static struct rq *finish_task_switch(str
> * to use.
> */
> smp_mb__after_unlock_lock();
> - finish_lock_switch(rq, prev);
> + release_task(prev);
> + finish_lock_switch(rq);
> finish_arch_post_lock_switch();
>
> fire_sched_in_preempt_notifiers(current);
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(s
> # define finish_arch_post_lock_switch() do { } while (0)
> #endif
>
> -static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
> -{
> -#ifdef CONFIG_SMP
> - /*
> - * We can optimise this out completely for !SMP, because the
> - * SMP rebalancing from interrupt is the only thing that cares
> - * here.
> - */
> - next->on_cpu = 1;
> -#endif
> -}
> -
> -static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
> -{
> -#ifdef CONFIG_SMP
> - /*
> - * After ->on_cpu is cleared, the task can be moved to a different CPU.
> - * We must ensure this doesn't happen until the switch is completely
> - * finished.
> - *
> - * In particular, the load of prev->state in finish_task_switch() must
> - * happen before this.
> - *
> - * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
> - */
> - smp_store_release(&prev->on_cpu, 0);
> -#endif
> -#ifdef CONFIG_DEBUG_SPINLOCK
> - /* this is a valid case when another task releases the spinlock */
> - rq->lock.owner = current;
> -#endif
> - /*
> - * If we are tracking spinlock dependencies then we have to
> - * fix up the runqueue lock - which gets 'carried over' from
> - * prev into current:
> - */
> - spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
> -
> - raw_spin_unlock_irq(&rq->lock);
> -}
> -
> /*
> * wake flags
> */
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] Adjustments: lock/unlock task in context_switch
@ 2017-12-19 14:23 ` Rodrigo Siqueira
0 siblings, 0 replies; 9+ messages in thread
From: Rodrigo Siqueira @ 2017-12-19 14:23 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: Ingo Molnar, kernel-janitors, linux-kernel
Thanks for the review :)
Below I just have a small comment in the changed version of the patch
> Thanks; I've slightly changed it, find below. I'll queue it for the next
> merge window.
>
> ---
> Subject: sched: Rework / clarify prepare_lock_switch()
> From: rodrigosiqueira <rodrigosiqueiramelo@gmail.com>
> Date: Fri, 15 Dec 2017 12:06:03 -0200
>
> The function prepare_lock_switch has an unused parameter, and also the
> function name was not descriptive. To improve the readability and remove
> the extra parameter, the following changes were made:
>
> * Moved prepare_lock_switch from kernel/sched/sched.h to
> kernel/sched/core.c, renamed it to acquire_task, and removed the
> unused parameter.
>
> * Split the smp_store_release() out from finish_lock_switch() to a
> function named release_task.
>
> * Comments ajdustments.
>
> Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Link: http://lkml.kernel.org/r/20171215140603.gxe5i2y6fg5ojfpp@smtp.gmail.com
> ---
> kernel/sched/core.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++----
> kernel/sched/sched.h | 41 ---------------------------------------
> 2 files changed, 49 insertions(+), 45 deletions(-)
>
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, un
> * If the owning (remote) CPU is still in the middle of schedule() with
> * this task as prev, wait until its done referencing the task.
> *
> - * Pairs with the smp_store_release() in finish_lock_switch().
> + * Pairs with the smp_store_release() in release_task().
> *
> * This ensures that tasks getting woken will be fully ordered against
> * their previous state and preserve Program Order.
> @@ -2571,6 +2571,50 @@ fire_sched_out_preempt_notifiers(struct
>
> #endif /* CONFIG_PREEMPT_NOTIFIERS */
>
> +static inline void acquire_task(struct task_struct *next)
In the original patch, I called this function as release_lock_task,
because the release_task was already declared as extern in
include/linux/sched/task.h. I believe there is a function name conflict
here, is that correct?
> +{
> +#ifdef CONFIG_SMP
> + /*
> + * Claim the task as running, we do this before switching to it
> + * such that any running task will have this set.
> + */
> + next->on_cpu = 1;
> +#endif
> +}
> +
> +static inline void release_task(struct task_struct *prev)
> +{
> +#ifdef CONFIG_SMP
> + /*
> + * After ->on_cpu is cleared, the task can be moved to a different CPU.
> + * We must ensure this doesn't happen until the switch is completely
> + * finished.
> + *
> + * In particular, the load of prev->state in finish_task_switch() must
> + * happen before this.
> + *
> + * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
> + */
> + smp_store_release(&prev->on_cpu, 0);
> +#endif
> +}
> +
> +static inline void finish_lock_switch(struct rq *rq)
> +{
> +#ifdef CONFIG_DEBUG_SPINLOCK
> + /* this is a valid case when another task releases the spinlock */
> + rq->lock.owner = current;
> +#endif
> + /*
> + * If we are tracking spinlock dependencies then we have to
> + * fix up the runqueue lock - which gets 'carried over' from
> + * prev into current:
> + */
> + spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
> +
> + raw_spin_unlock_irq(&rq->lock);
> +}
> +
> /**
> * prepare_task_switch - prepare to switch tasks
> * @rq: the runqueue preparing to switch
> @@ -2591,7 +2635,7 @@ prepare_task_switch(struct rq *rq, struc
> sched_info_switch(rq, prev, next);
> perf_event_task_sched_out(prev, next);
> fire_sched_out_preempt_notifiers(prev, next);
> - prepare_lock_switch(rq, next);
> + acquire_task(next);
> prepare_arch_switch(next);
> }
>
> @@ -2646,7 +2690,7 @@ static struct rq *finish_task_switch(str
> * the scheduled task must drop that reference.
> *
> * We must observe prev->state before clearing prev->on_cpu (in
> - * finish_lock_switch), otherwise a concurrent wakeup can get prev
> + * release_task), otherwise a concurrent wakeup can get prev
> * running on another CPU and we could rave with its RUNNING -> DEAD
> * transition, resulting in a double drop.
> */
> @@ -2663,7 +2707,8 @@ static struct rq *finish_task_switch(str
> * to use.
> */
> smp_mb__after_unlock_lock();
> - finish_lock_switch(rq, prev);
> + release_task(prev);
> + finish_lock_switch(rq);
> finish_arch_post_lock_switch();
>
> fire_sched_in_preempt_notifiers(current);
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(s
> # define finish_arch_post_lock_switch() do { } while (0)
> #endif
>
> -static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
> -{
> -#ifdef CONFIG_SMP
> - /*
> - * We can optimise this out completely for !SMP, because the
> - * SMP rebalancing from interrupt is the only thing that cares
> - * here.
> - */
> - next->on_cpu = 1;
> -#endif
> -}
> -
> -static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
> -{
> -#ifdef CONFIG_SMP
> - /*
> - * After ->on_cpu is cleared, the task can be moved to a different CPU.
> - * We must ensure this doesn't happen until the switch is completely
> - * finished.
> - *
> - * In particular, the load of prev->state in finish_task_switch() must
> - * happen before this.
> - *
> - * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
> - */
> - smp_store_release(&prev->on_cpu, 0);
> -#endif
> -#ifdef CONFIG_DEBUG_SPINLOCK
> - /* this is a valid case when another task releases the spinlock */
> - rq->lock.owner = current;
> -#endif
> - /*
> - * If we are tracking spinlock dependencies then we have to
> - * fix up the runqueue lock - which gets 'carried over' from
> - * prev into current:
> - */
> - spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
> -
> - raw_spin_unlock_irq(&rq->lock);
> -}
> -
> /*
> * wake flags
> */
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] Adjustments: lock/unlock task in context_switch
2017-12-19 14:23 ` Rodrigo Siqueira
@ 2017-12-19 14:38 ` Peter Zijlstra
-1 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2017-12-19 14:38 UTC (permalink / raw)
To: Rodrigo Siqueira; +Cc: Ingo Molnar, kernel-janitors, linux-kernel
On Tue, Dec 19, 2017 at 12:23:57PM -0200, Rodrigo Siqueira wrote:
> In the original patch, I called this function as release_lock_task,
> because the release_task was already declared as extern in
> include/linux/sched/task.h. I believe there is a function name conflict
> here, is that correct?
Bah, you're right. Clearly I didn't compile test yet.. I'll fix it up.
Thanks.
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] Adjustments: lock/unlock task in context_switch
@ 2017-12-19 14:38 ` Peter Zijlstra
0 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2017-12-19 14:38 UTC (permalink / raw)
To: Rodrigo Siqueira; +Cc: Ingo Molnar, kernel-janitors, linux-kernel
On Tue, Dec 19, 2017 at 12:23:57PM -0200, Rodrigo Siqueira wrote:
> In the original patch, I called this function as release_lock_task,
> because the release_task was already declared as extern in
> include/linux/sched/task.h. I believe there is a function name conflict
> here, is that correct?
Bah, you're right. Clearly I didn't compile test yet.. I'll fix it up.
Thanks.
^ permalink raw reply [flat|nested] 9+ messages in thread
* [tip:sched/core] sched/core: Rework and clarify prepare_lock_switch()
2017-12-15 14:06 ` rodrigosiqueira
(?)
(?)
@ 2018-01-10 12:13 ` tip-bot for rodrigosiqueira
-1 siblings, 0 replies; 9+ messages in thread
From: tip-bot for rodrigosiqueira @ 2018-01-10 12:13 UTC (permalink / raw)
To: linux-tip-commits
Cc: hpa, rodrigosiqueiramelo, tglx, torvalds, mingo, peterz, linux-kernel
Commit-ID: 31cb1bc0dc94882a588930f4d007b570c481fd17
Gitweb: https://git.kernel.org/tip/31cb1bc0dc94882a588930f4d007b570c481fd17
Author: rodrigosiqueira <rodrigosiqueiramelo@gmail.com>
AuthorDate: Fri, 15 Dec 2017 12:06:03 -0200
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Wed, 10 Jan 2018 11:30:27 +0100
sched/core: Rework and clarify prepare_lock_switch()
The prepare_lock_switch() function has an unused parameter, and also the
function name was not descriptive. To improve readability and remove
the extra parameter, do the following changes:
* Move prepare_lock_switch() from kernel/sched/sched.h to
kernel/sched/core.c, rename it to prepare_task(), and remove the
unused parameter.
* Split the smp_store_release() out from finish_lock_switch() to a
function named finish_task.
* Comments ajdustments.
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20171215140603.gxe5i2y6fg5ojfpp@smtp.gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/core.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++----
kernel/sched/sched.h | 41 ----------------------------------------
2 files changed, 49 insertions(+), 45 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 644fa2e..a794f81 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
- * Pairs with the smp_store_release() in finish_lock_switch().
+ * Pairs with the smp_store_release() in finish_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
@@ -2571,6 +2571,50 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
#endif /* CONFIG_PREEMPT_NOTIFIERS */
+static inline void prepare_task(struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Claim the task as running, we do this before switching to it
+ * such that any running task will have this set.
+ */
+ next->on_cpu = 1;
+#endif
+}
+
+static inline void finish_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ *
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+ */
+ smp_store_release(&prev->on_cpu, 0);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+ raw_spin_unlock_irq(&rq->lock);
+}
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -2591,7 +2635,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
- prepare_lock_switch(rq, next);
+ prepare_task(next);
prepare_arch_switch(next);
}
@@ -2646,7 +2690,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* the scheduled task must drop that reference.
*
* We must observe prev->state before clearing prev->on_cpu (in
- * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * finish_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
@@ -2663,7 +2707,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* to use.
*/
smp_mb__after_unlock_lock();
- finish_lock_switch(rq, prev);
+ finish_task(prev);
+ finish_lock_switch(rq);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b19552a2..43f5d6e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
# define finish_arch_post_lock_switch() do { } while (0)
#endif
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- *
- * In particular, the load of prev->state in finish_task_switch() must
- * happen before this.
- *
- * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
- */
- smp_store_release(&prev->on_cpu, 0);
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- /* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
-#endif
- /*
- * If we are tracking spinlock dependencies then we have to
- * fix up the runqueue lock - which gets 'carried over' from
- * prev into current:
- */
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
- raw_spin_unlock_irq(&rq->lock);
-}
-
/*
* wake flags
*/
^ permalink raw reply related [flat|nested] 9+ messages in thread
end of thread, other threads:[~2018-01-10 12:18 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-15 14:06 [PATCH v2] Adjustments: lock/unlock task in context_switch rodrigosiqueira
2017-12-15 14:06 ` rodrigosiqueira
2017-12-18 19:30 ` Peter Zijlstra
2017-12-18 19:30 ` Peter Zijlstra
2017-12-19 14:23 ` Rodrigo Siqueira
2017-12-19 14:23 ` Rodrigo Siqueira
2017-12-19 14:38 ` Peter Zijlstra
2017-12-19 14:38 ` Peter Zijlstra
2018-01-10 12:13 ` [tip:sched/core] sched/core: Rework and clarify prepare_lock_switch() tip-bot for rodrigosiqueira
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.