All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch V5 0/3] smp: Make softirq handling RT friendly
@ 2022-04-13 13:31 Thomas Gleixner
  2022-04-13 13:31 ` [patch V5 1/3] sched: Fix missing prototype warnings Thomas Gleixner
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Thomas Gleixner @ 2022-04-13 13:31 UTC (permalink / raw)
  To: LKML; +Cc: Christoph Hellwig, Peter Zijlstra, Sebastian Andrzej Siewior

The invocation of do_softirq() in flush_smp_call_function_from_idle()
breaks on RT because RT does not provide do_softirq().

Aside of that flush_smp_call_function_from_idle() is a misnomer since the
function is invoked not only from idle, but also from the scheduler
migration thread.

The following series addresses this. Changes vs. V4, which can be found
here: https://lore.kernel.org/lkml/20220412203649.956258017@linutronix.de

  - Fix a boatload of missing prototypes warning related to the
    scheduler and its header files.

Thanks,

	tglx
---
 include/linux/interrupt.h    |    9 +++++++++
 include/linux/sched.h        |    2 ++
 kernel/sched/build_policy.c  |    2 ++
 kernel/sched/build_utility.c |    1 +
 kernel/sched/core.c          |    5 ++++-
 kernel/sched/deadline.c      |    2 --
 kernel/sched/fair.c          |    1 +
 kernel/sched/idle.c          |    2 +-
 kernel/sched/sched.h         |    8 ++------
 kernel/sched/smp.h           |    6 ++++++
 kernel/smp.c                 |   32 ++++++++++++++++++++++++--------
 kernel/softirq.c             |   13 +++++++++++++
 kernel/stop_machine.c        |    2 --
 13 files changed, 65 insertions(+), 20 deletions(-)



^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch V5 1/3] sched: Fix missing prototype warnings
  2022-04-13 13:31 [patch V5 0/3] smp: Make softirq handling RT friendly Thomas Gleixner
@ 2022-04-13 13:31 ` Thomas Gleixner
  2022-05-01  8:05   ` [tip: sched/core] " tip-bot2 for Thomas Gleixner
  2022-04-13 13:31 ` [patch V5 2/3] smp: Rename flush_smp_call_function_from_idle() Thomas Gleixner
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Thomas Gleixner @ 2022-04-13 13:31 UTC (permalink / raw)
  To: LKML
  Cc: Christoph Hellwig, Peter Zijlstra, Sebastian Andrzej Siewior,
	kernel test robot

A W=1 build emits more than a dozen missing prototype warnings related to
scheduler and scheduler specific includes.

Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V5: New patch
---
 include/linux/sched.h        |    2 ++
 kernel/sched/build_policy.c  |    2 ++
 kernel/sched/build_utility.c |    1 +
 kernel/sched/core.c          |    3 +++
 kernel/sched/deadline.c      |    2 --
 kernel/sched/fair.c          |    1 +
 kernel/sched/sched.h         |    8 ++------
 kernel/sched/smp.h           |    6 ++++++
 kernel/stop_machine.c        |    2 --
 9 files changed, 17 insertions(+), 10 deletions(-)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2361,4 +2361,6 @@ static inline void sched_core_free(struc
 static inline void sched_core_fork(struct task_struct *p) { }
 #endif
 
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
+
 #endif
--- a/kernel/sched/build_policy.c
+++ b/kernel/sched/build_policy.c
@@ -15,6 +15,7 @@
 /* Headers: */
 #include <linux/sched/clock.h>
 #include <linux/sched/cputime.h>
+#include <linux/sched/hotplug.h>
 #include <linux/sched/posix-timers.h>
 #include <linux/sched/rt.h>
 
@@ -31,6 +32,7 @@
 #include <uapi/linux/sched/types.h>
 
 #include "sched.h"
+#include "smp.h"
 
 #include "autogroup.h"
 #include "stats.h"
--- a/kernel/sched/build_utility.c
+++ b/kernel/sched/build_utility.c
@@ -14,6 +14,7 @@
 #include <linux/sched/debug.h>
 #include <linux/sched/isolation.h>
 #include <linux/sched/loadavg.h>
+#include <linux/sched/nohz.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/rseq_api.h>
 #include <linux/sched/task_stack.h>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -26,7 +26,10 @@
 #include <linux/topology.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/cond_resched.h>
+#include <linux/sched/cputime.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/init.h>
 #include <linux/sched/isolation.h>
 #include <linux/sched/loadavg.h>
 #include <linux/sched/mm.h>
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1220,8 +1220,6 @@ int dl_runtime_exceeded(struct sched_dl_
 	return (dl_se->runtime <= 0);
 }
 
-extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
-
 /*
  * This function implements the GRUB accounting rule:
  * according to the GRUB reclaiming algorithm, the runtime is
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -36,6 +36,7 @@
 #include <linux/sched/cond_resched.h>
 #include <linux/sched/cputime.h>
 #include <linux/sched/isolation.h>
+#include <linux/sched/nohz.h>
 
 #include <linux/cpuidle.h>
 #include <linux/interrupt.h>
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1827,12 +1827,7 @@ static inline void dirty_sched_domain_sy
 #endif
 
 extern int sched_update_scaling(void);
-
-extern void flush_smp_call_function_from_idle(void);
-
-#else /* !CONFIG_SMP: */
-static inline void flush_smp_call_function_from_idle(void) { }
-#endif
+#endif /* CONFIG_SMP */
 
 #include "stats.h"
 
@@ -2309,6 +2304,7 @@ extern void resched_cpu(int cpu);
 
 extern struct rt_bandwidth def_rt_bandwidth;
 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 
 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
--- a/kernel/sched/smp.h
+++ b/kernel/sched/smp.h
@@ -7,3 +7,9 @@
 extern void sched_ttwu_pending(void *arg);
 
 extern void send_call_function_single_ipi(int cpu);
+
+#ifdef CONFIG_SMP
+extern void flush_smp_call_function_from_idle(void);
+#else
+static inline void flush_smp_call_function_from_idle(void) { }
+#endif
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -535,8 +535,6 @@ void stop_machine_park(int cpu)
 	kthread_park(stopper->thread);
 }
 
-extern void sched_set_stop_task(int cpu, struct task_struct *stop);
-
 static void cpu_stop_create(unsigned int cpu)
 {
 	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch V5 2/3] smp: Rename flush_smp_call_function_from_idle()
  2022-04-13 13:31 [patch V5 0/3] smp: Make softirq handling RT friendly Thomas Gleixner
  2022-04-13 13:31 ` [patch V5 1/3] sched: Fix missing prototype warnings Thomas Gleixner
@ 2022-04-13 13:31 ` Thomas Gleixner
  2022-05-01  8:05   ` [tip: sched/core] " tip-bot2 for Thomas Gleixner
  2022-04-13 13:31 ` [patch V5 3/3] smp: Make softirq handling RT safe in flush_smp_call_function_queue() Thomas Gleixner
  2022-04-25 12:55 ` [patch V5 0/3] smp: Make softirq handling RT friendly Peter Zijlstra
  3 siblings, 1 reply; 8+ messages in thread
From: Thomas Gleixner @ 2022-04-13 13:31 UTC (permalink / raw)
  To: LKML; +Cc: Christoph Hellwig, Peter Zijlstra, Sebastian Andrzej Siewior

This is invoked from the stopper thread too, which is definitely not idle.
Rename it to flush_smp_call_function_queue() and fixup the callers.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V4: New patch
---
 kernel/sched/core.c |    2 +-
 kernel/sched/idle.c |    2 +-
 kernel/sched/smp.h  |    4 ++--
 kernel/smp.c        |   27 ++++++++++++++++++++-------
 4 files changed, 24 insertions(+), 11 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2411,7 +2411,7 @@ static int migration_cpu_stop(void *data
 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
 	 */
-	flush_smp_call_function_from_idle();
+	flush_smp_call_function_queue();
 
 	raw_spin_lock(&p->pi_lock);
 	rq_lock(rq, &rf);
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -327,7 +327,7 @@ static void do_idle(void)
 	 * RCU relies on this call to be done outside of an RCU read-side
 	 * critical section.
 	 */
-	flush_smp_call_function_from_idle();
+	flush_smp_call_function_queue();
 	schedule_idle();
 
 	if (unlikely(klp_patch_pending(current)))
--- a/kernel/sched/smp.h
+++ b/kernel/sched/smp.h
@@ -9,7 +9,7 @@ extern void sched_ttwu_pending(void *arg
 extern void send_call_function_single_ipi(int cpu);
 
 #ifdef CONFIG_SMP
-extern void flush_smp_call_function_from_idle(void);
+extern void flush_smp_call_function_queue(void);
 #else
-static inline void flush_smp_call_function_from_idle(void) { }
+static inline void flush_smp_call_function_queue(void) { }
 #endif
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -96,7 +96,7 @@ static DEFINE_PER_CPU_ALIGNED(struct cal
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
-static void flush_smp_call_function_queue(bool warn_cpu_offline);
+static void __flush_smp_call_function_queue(bool warn_cpu_offline);
 
 int smpcfd_prepare_cpu(unsigned int cpu)
 {
@@ -141,7 +141,7 @@ int smpcfd_dying_cpu(unsigned int cpu)
 	 * ensure that the outgoing CPU doesn't go offline with work
 	 * still pending.
 	 */
-	flush_smp_call_function_queue(false);
+	__flush_smp_call_function_queue(false);
 	irq_work_run();
 	return 0;
 }
@@ -541,11 +541,11 @@ void generic_smp_call_function_single_in
 {
 	cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
 		      smp_processor_id(), CFD_SEQ_GOTIPI);
-	flush_smp_call_function_queue(true);
+	__flush_smp_call_function_queue(true);
 }
 
 /**
- * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
+ * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
  *
  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
  *		      offline CPU. Skip this check if set to 'false'.
@@ -558,7 +558,7 @@ void generic_smp_call_function_single_in
  * Loop through the call_single_queue and run all the queued callbacks.
  * Must be called with interrupts disabled.
  */
-static void flush_smp_call_function_queue(bool warn_cpu_offline)
+static void __flush_smp_call_function_queue(bool warn_cpu_offline)
 {
 	call_single_data_t *csd, *csd_next;
 	struct llist_node *entry, *prev;
@@ -681,7 +681,20 @@ static void flush_smp_call_function_queu
 		      smp_processor_id(), CFD_SEQ_HDLEND);
 }
 
-void flush_smp_call_function_from_idle(void)
+
+/**
+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
+ *				   from task context (idle, migration thread)
+ *
+ * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
+ * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
+ * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
+ * handle queued SMP function calls before scheduling.
+ *
+ * The migration thread has to ensure that an eventually pending wakeup has
+ * been handled before it migrates a task.
+ */
+void flush_smp_call_function_queue(void)
 {
 	unsigned long flags;
 
@@ -691,7 +704,7 @@ void flush_smp_call_function_from_idle(v
 	cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
 		      smp_processor_id(), CFD_SEQ_IDLE);
 	local_irq_save(flags);
-	flush_smp_call_function_queue(true);
+	__flush_smp_call_function_queue(true);
 	if (local_softirq_pending())
 		do_softirq();
 


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch V5 3/3] smp: Make softirq handling RT safe in flush_smp_call_function_queue()
  2022-04-13 13:31 [patch V5 0/3] smp: Make softirq handling RT friendly Thomas Gleixner
  2022-04-13 13:31 ` [patch V5 1/3] sched: Fix missing prototype warnings Thomas Gleixner
  2022-04-13 13:31 ` [patch V5 2/3] smp: Rename flush_smp_call_function_from_idle() Thomas Gleixner
@ 2022-04-13 13:31 ` Thomas Gleixner
  2022-04-20 15:56   ` Sebastian Andrzej Siewior
  2022-04-25 12:55 ` [patch V5 0/3] smp: Make softirq handling RT friendly Peter Zijlstra
  3 siblings, 1 reply; 8+ messages in thread
From: Thomas Gleixner @ 2022-04-13 13:31 UTC (permalink / raw)
  To: LKML; +Cc: Christoph Hellwig, Peter Zijlstra, Sebastian Andrzej Siewior

From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

flush_smp_call_function_queue() invokes do_softirq() which is not available
on PREEMPT_RT. flush_smp_call_function_queue() is invoked from the idle
task and the migration task with preemption or interrupts disabled.

So RT kernels cannot process soft interrupts in that context as that has to
acquire 'sleeping spinlocks' which is not possible with preemption or
interrupts disabled and forbidden from the idle task anyway.

The currently known SMP function call which raises a soft interrupt is in
the block layer, but this functionality is not enabled on RT kernels due to
latency and performance reasons.

RT could wake up ksoftirqd unconditionally, but this wants to be avoided if
there were soft interrupts pending already when this is invoked in the
context of the migration task. The migration task might have preempted a
threaded interrupt handler which raised a soft interrupt, but did not reach
the local_bh_enable() to process it. The "running" ksoftirqd might prevent
the handling in the interrupt thread context which is causing latency
issues.

Add a new function which handles this case explicitely for RT and falls
back to do_softirq() on !RT kernels. In the RT case this warns when one of
the flushed SMP function calls raised a soft interrupt so this can be
investigated.

[ tglx: Moved the RT part out of SMP code ]

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/YgKgL6aPj8aBES6G@linutronix.de
---
v4:
  - Move the RT logic into softirq.c which also avoids the wakeup
    when softinterrupts are disabled. The enable will handle
    them anyway.
v3:
   - Only wake ksoftirqd if the softirqs were raised wthin
     flush_smp_call_function_queue().
   - Add a warning in the wake case.
v2: Drop an empty line.

 include/linux/interrupt.h |    9 +++++++++
 kernel/smp.c              |    5 ++++-
 kernel/softirq.c          |   13 +++++++++++++
 3 files changed, 26 insertions(+), 1 deletion(-)
---

--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -607,6 +607,15 @@ struct softirq_action
 asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
 
+#ifdef CONFIG_PREEMPT_RT
+extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
+#else
+static inline void do_softirq_post_smp_call_flush(unsigned int unused)
+{
+	do_softirq();
+}
+#endif
+
 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
 extern void softirq_init(void);
 extern void __raise_softirq_irqoff(unsigned int nr);
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -696,6 +696,7 @@ static void __flush_smp_call_function_qu
  */
 void flush_smp_call_function_queue(void)
 {
+	unsigned int was_pending;
 	unsigned long flags;
 
 	if (llist_empty(this_cpu_ptr(&call_single_queue)))
@@ -704,9 +705,11 @@ void flush_smp_call_function_queue(void)
 	cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
 		      smp_processor_id(), CFD_SEQ_IDLE);
 	local_irq_save(flags);
+	/* Get the already pending soft interrupts for RT enabled kernels */
+	was_pending = local_softirq_pending();
 	__flush_smp_call_function_queue(true);
 	if (local_softirq_pending())
-		do_softirq();
+		do_softirq_post_smp_call_flush(was_pending);
 
 	local_irq_restore(flags);
 }
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -294,6 +294,19 @@ static inline void invoke_softirq(void)
 		wakeup_softirqd();
 }
 
+/*
+ * flush_smp_call_function_queue() can raise a soft interrupt in a function
+ * call. On RT kernels this is undesired and the only known functionality
+ * in the block layer which does this is disabled on RT. If soft interrupts
+ * get raised which haven't been raised before the flush, warn so it can be
+ * investigated.
+ */
+void softirq_post_smp_call_flush(unsigned int was_pending)
+{
+	if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
+		invoke_softirq();
+}
+
 #else /* CONFIG_PREEMPT_RT */
 
 /*


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [patch V5 3/3] smp: Make softirq handling RT safe in flush_smp_call_function_queue()
  2022-04-13 13:31 ` [patch V5 3/3] smp: Make softirq handling RT safe in flush_smp_call_function_queue() Thomas Gleixner
@ 2022-04-20 15:56   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 8+ messages in thread
From: Sebastian Andrzej Siewior @ 2022-04-20 15:56 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: LKML, Christoph Hellwig, Peter Zijlstra

On 2022-04-13 15:31:05 [+0200], Thomas Gleixner wrote:
…
> +#ifdef CONFIG_PREEMPT_RT
> +extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
> +#else
> +static inline void do_softirq_post_smp_call_flush(unsigned int unused)
> +{
> +	do_softirq();
> +}
> +#endif
> +
> +void softirq_post_smp_call_flush(unsigned int was_pending)
> +{
> +	if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
> +		invoke_softirq();
> +}
> +
>  #else /* CONFIG_PREEMPT_RT */

fold, please.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 kernel/softirq.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 1682586a69139..5b36ebe5e20de 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -301,7 +301,7 @@ static inline void invoke_softirq(void)
  * get raised which haven't been raised before the flush, warn so it can be
  * investigated.
  */
-void softirq_post_smp_call_flush(unsigned int was_pending)
+void do_softirq_post_smp_call_flush(unsigned int was_pending)
 {
 	if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
 		invoke_softirq();

Sebastian

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [patch V5 0/3] smp: Make softirq handling RT friendly
  2022-04-13 13:31 [patch V5 0/3] smp: Make softirq handling RT friendly Thomas Gleixner
                   ` (2 preceding siblings ...)
  2022-04-13 13:31 ` [patch V5 3/3] smp: Make softirq handling RT safe in flush_smp_call_function_queue() Thomas Gleixner
@ 2022-04-25 12:55 ` Peter Zijlstra
  3 siblings, 0 replies; 8+ messages in thread
From: Peter Zijlstra @ 2022-04-25 12:55 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: LKML, Christoph Hellwig, Sebastian Andrzej Siewior

On Wed, Apr 13, 2022 at 03:31:00PM +0200, Thomas Gleixner wrote:
>  include/linux/interrupt.h    |    9 +++++++++
>  include/linux/sched.h        |    2 ++
>  kernel/sched/build_policy.c  |    2 ++
>  kernel/sched/build_utility.c |    1 +
>  kernel/sched/core.c          |    5 ++++-
>  kernel/sched/deadline.c      |    2 --
>  kernel/sched/fair.c          |    1 +
>  kernel/sched/idle.c          |    2 +-
>  kernel/sched/sched.h         |    8 ++------
>  kernel/sched/smp.h           |    6 ++++++
>  kernel/smp.c                 |   32 ++++++++++++++++++++++++--------
>  kernel/softirq.c             |   13 +++++++++++++
>  kernel/stop_machine.c        |    2 --
>  13 files changed, 65 insertions(+), 20 deletions(-)

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [tip: sched/core] smp: Rename flush_smp_call_function_from_idle()
  2022-04-13 13:31 ` [patch V5 2/3] smp: Rename flush_smp_call_function_from_idle() Thomas Gleixner
@ 2022-05-01  8:05   ` tip-bot2 for Thomas Gleixner
  0 siblings, 0 replies; 8+ messages in thread
From: tip-bot2 for Thomas Gleixner @ 2022-05-01  8:05 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Thomas Gleixner, Peter Zijlstra (Intel), x86, linux-kernel

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     16bf5a5e1ec56474ed2a19d72f272ed09a5d3ea1
Gitweb:        https://git.kernel.org/tip/16bf5a5e1ec56474ed2a19d72f272ed09a5d3ea1
Author:        Thomas Gleixner <tglx@linutronix.de>
AuthorDate:    Wed, 13 Apr 2022 15:31:03 +02:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Sun, 01 May 2022 10:03:43 +02:00

smp: Rename flush_smp_call_function_from_idle()

This is invoked from the stopper thread too, which is definitely not idle.
Rename it to flush_smp_call_function_queue() and fixup the callers.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220413133024.305001096@linutronix.de

---
 kernel/sched/core.c |  2 +-
 kernel/sched/idle.c |  2 +-
 kernel/sched/smp.h  |  4 ++--
 kernel/smp.c        | 27 ++++++++++++++++++++-------
 4 files changed, 24 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e644578..07bacb0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2411,7 +2411,7 @@ static int migration_cpu_stop(void *data)
 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
 	 */
-	flush_smp_call_function_from_idle();
+	flush_smp_call_function_queue();
 
 	raw_spin_lock(&p->pi_lock);
 	rq_lock(rq, &rf);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8f8b502..60295db 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -327,7 +327,7 @@ static void do_idle(void)
 	 * RCU relies on this call to be done outside of an RCU read-side
 	 * critical section.
 	 */
-	flush_smp_call_function_from_idle();
+	flush_smp_call_function_queue();
 	schedule_idle();
 
 	if (unlikely(klp_patch_pending(current)))
diff --git a/kernel/sched/smp.h b/kernel/sched/smp.h
index 5719bf9..2eb23dd 100644
--- a/kernel/sched/smp.h
+++ b/kernel/sched/smp.h
@@ -9,7 +9,7 @@ extern void sched_ttwu_pending(void *arg);
 extern void send_call_function_single_ipi(int cpu);
 
 #ifdef CONFIG_SMP
-extern void flush_smp_call_function_from_idle(void);
+extern void flush_smp_call_function_queue(void);
 #else
-static inline void flush_smp_call_function_from_idle(void) { }
+static inline void flush_smp_call_function_queue(void) { }
 #endif
diff --git a/kernel/smp.c b/kernel/smp.c
index 01a7c17..8e85f22 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -96,7 +96,7 @@ static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
-static void flush_smp_call_function_queue(bool warn_cpu_offline);
+static void __flush_smp_call_function_queue(bool warn_cpu_offline);
 
 int smpcfd_prepare_cpu(unsigned int cpu)
 {
@@ -141,7 +141,7 @@ int smpcfd_dying_cpu(unsigned int cpu)
 	 * ensure that the outgoing CPU doesn't go offline with work
 	 * still pending.
 	 */
-	flush_smp_call_function_queue(false);
+	__flush_smp_call_function_queue(false);
 	irq_work_run();
 	return 0;
 }
@@ -541,11 +541,11 @@ void generic_smp_call_function_single_interrupt(void)
 {
 	cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
 		      smp_processor_id(), CFD_SEQ_GOTIPI);
-	flush_smp_call_function_queue(true);
+	__flush_smp_call_function_queue(true);
 }
 
 /**
- * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
+ * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
  *
  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
  *		      offline CPU. Skip this check if set to 'false'.
@@ -558,7 +558,7 @@ void generic_smp_call_function_single_interrupt(void)
  * Loop through the call_single_queue and run all the queued callbacks.
  * Must be called with interrupts disabled.
  */
-static void flush_smp_call_function_queue(bool warn_cpu_offline)
+static void __flush_smp_call_function_queue(bool warn_cpu_offline)
 {
 	call_single_data_t *csd, *csd_next;
 	struct llist_node *entry, *prev;
@@ -681,7 +681,20 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
 		      smp_processor_id(), CFD_SEQ_HDLEND);
 }
 
-void flush_smp_call_function_from_idle(void)
+
+/**
+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
+ *				   from task context (idle, migration thread)
+ *
+ * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
+ * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
+ * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
+ * handle queued SMP function calls before scheduling.
+ *
+ * The migration thread has to ensure that an eventually pending wakeup has
+ * been handled before it migrates a task.
+ */
+void flush_smp_call_function_queue(void)
 {
 	unsigned long flags;
 
@@ -691,7 +704,7 @@ void flush_smp_call_function_from_idle(void)
 	cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
 		      smp_processor_id(), CFD_SEQ_IDLE);
 	local_irq_save(flags);
-	flush_smp_call_function_queue(true);
+	__flush_smp_call_function_queue(true);
 	if (local_softirq_pending())
 		do_softirq();
 

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [tip: sched/core] sched: Fix missing prototype warnings
  2022-04-13 13:31 ` [patch V5 1/3] sched: Fix missing prototype warnings Thomas Gleixner
@ 2022-05-01  8:05   ` tip-bot2 for Thomas Gleixner
  0 siblings, 0 replies; 8+ messages in thread
From: tip-bot2 for Thomas Gleixner @ 2022-05-01  8:05 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: kernel test robot, Thomas Gleixner, Peter Zijlstra (Intel),
	x86, linux-kernel

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     d664e399128bd78b905ff480917e2c2d4949e101
Gitweb:        https://git.kernel.org/tip/d664e399128bd78b905ff480917e2c2d4949e101
Author:        Thomas Gleixner <tglx@linutronix.de>
AuthorDate:    Wed, 13 Apr 2022 15:31:02 +02:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Sun, 01 May 2022 10:03:43 +02:00

sched: Fix missing prototype warnings

A W=1 build emits more than a dozen missing prototype warnings related to
scheduler and scheduler specific includes.

Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220413133024.249118058@linutronix.de
---
 include/linux/sched.h        | 2 ++
 kernel/sched/build_policy.c  | 2 ++
 kernel/sched/build_utility.c | 1 +
 kernel/sched/core.c          | 3 +++
 kernel/sched/deadline.c      | 2 --
 kernel/sched/fair.c          | 1 +
 kernel/sched/sched.h         | 8 ++------
 kernel/sched/smp.h           | 6 ++++++
 kernel/stop_machine.c        | 2 --
 9 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index fc74ea2..a27316f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2388,4 +2388,6 @@ static inline void sched_core_free(struct task_struct *tsk) { }
 static inline void sched_core_fork(struct task_struct *p) { }
 #endif
 
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
+
 #endif
diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
index e0104b4..d9dc9ab 100644
--- a/kernel/sched/build_policy.c
+++ b/kernel/sched/build_policy.c
@@ -15,6 +15,7 @@
 /* Headers: */
 #include <linux/sched/clock.h>
 #include <linux/sched/cputime.h>
+#include <linux/sched/hotplug.h>
 #include <linux/sched/posix-timers.h>
 #include <linux/sched/rt.h>
 
@@ -31,6 +32,7 @@
 #include <uapi/linux/sched/types.h>
 
 #include "sched.h"
+#include "smp.h"
 
 #include "autogroup.h"
 #include "stats.h"
diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
index eec0849..99bdd96 100644
--- a/kernel/sched/build_utility.c
+++ b/kernel/sched/build_utility.c
@@ -14,6 +14,7 @@
 #include <linux/sched/debug.h>
 #include <linux/sched/isolation.h>
 #include <linux/sched/loadavg.h>
+#include <linux/sched/nohz.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/rseq_api.h>
 #include <linux/sched/task_stack.h>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 068c088..e644578 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -26,7 +26,10 @@
 #include <linux/topology.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/cond_resched.h>
+#include <linux/sched/cputime.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/init.h>
 #include <linux/sched/isolation.h>
 #include <linux/sched/loadavg.h>
 #include <linux/sched/mm.h>
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fb4255a..6ae4236 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1220,8 +1220,6 @@ int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 	return (dl_se->runtime <= 0);
 }
 
-extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
-
 /*
  * This function implements the GRUB accounting rule:
  * according to the GRUB reclaiming algorithm, the runtime is
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6ca054b..bc9f6e9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -36,6 +36,7 @@
 #include <linux/sched/cond_resched.h>
 #include <linux/sched/cputime.h>
 #include <linux/sched/isolation.h>
+#include <linux/sched/nohz.h>
 
 #include <linux/cpuidle.h>
 #include <linux/interrupt.h>
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 762be73..4784898 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1833,12 +1833,7 @@ static inline void dirty_sched_domain_sysctl(int cpu)
 #endif
 
 extern int sched_update_scaling(void);
-
-extern void flush_smp_call_function_from_idle(void);
-
-#else /* !CONFIG_SMP: */
-static inline void flush_smp_call_function_from_idle(void) { }
-#endif
+#endif /* CONFIG_SMP */
 
 #include "stats.h"
 
@@ -2315,6 +2310,7 @@ extern void resched_cpu(int cpu);
 
 extern struct rt_bandwidth def_rt_bandwidth;
 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 
 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
diff --git a/kernel/sched/smp.h b/kernel/sched/smp.h
index 9620e32..5719bf9 100644
--- a/kernel/sched/smp.h
+++ b/kernel/sched/smp.h
@@ -7,3 +7,9 @@
 extern void sched_ttwu_pending(void *arg);
 
 extern void send_call_function_single_ipi(int cpu);
+
+#ifdef CONFIG_SMP
+extern void flush_smp_call_function_from_idle(void);
+#else
+static inline void flush_smp_call_function_from_idle(void) { }
+#endif
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index cbc3027..6da7b91 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -535,8 +535,6 @@ void stop_machine_park(int cpu)
 	kthread_park(stopper->thread);
 }
 
-extern void sched_set_stop_task(int cpu, struct task_struct *stop);
-
 static void cpu_stop_create(unsigned int cpu)
 {
 	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));

^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-05-01  8:05 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-13 13:31 [patch V5 0/3] smp: Make softirq handling RT friendly Thomas Gleixner
2022-04-13 13:31 ` [patch V5 1/3] sched: Fix missing prototype warnings Thomas Gleixner
2022-05-01  8:05   ` [tip: sched/core] " tip-bot2 for Thomas Gleixner
2022-04-13 13:31 ` [patch V5 2/3] smp: Rename flush_smp_call_function_from_idle() Thomas Gleixner
2022-05-01  8:05   ` [tip: sched/core] " tip-bot2 for Thomas Gleixner
2022-04-13 13:31 ` [patch V5 3/3] smp: Make softirq handling RT safe in flush_smp_call_function_queue() Thomas Gleixner
2022-04-20 15:56   ` Sebastian Andrzej Siewior
2022-04-25 12:55 ` [patch V5 0/3] smp: Make softirq handling RT friendly Peter Zijlstra

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.