All of lore.kernel.org
 help / color / mirror / Atom feed
From: "tip-bot2 for Nadav Amit" <tip-bot2@linutronix.de>
To: linux-tip-commits@vger.kernel.org
Cc: Peter Zijlstra <peterz@infradead.org>,
	Nadav Amit <namit@vmware.com>, Ingo Molnar <mingo@kernel.org>,
	x86@kernel.org, linux-kernel@vger.kernel.org
Subject: [tip: x86/mm] smp: Inline on_each_cpu_cond() and on_each_cpu()
Date: Sat, 06 Mar 2021 12:12:53 -0000	[thread overview]
Message-ID: <161503277381.398.16737252451693471719.tip-bot2@tip-bot2> (raw)
In-Reply-To: <20210220231712.2475218-10-namit@vmware.com>

The following commit has been merged into the x86/mm branch of tip:

Commit-ID:     a5aa5ce300597224ec76dacc8e63ba3ad7a18bbd
Gitweb:        https://git.kernel.org/tip/a5aa5ce300597224ec76dacc8e63ba3ad7a18bbd
Author:        Nadav Amit <namit@vmware.com>
AuthorDate:    Sat, 20 Feb 2021 15:17:12 -08:00
Committer:     Ingo Molnar <mingo@kernel.org>
CommitterDate: Sat, 06 Mar 2021 12:59:10 +01:00

smp: Inline on_each_cpu_cond() and on_each_cpu()

Simplify the code and avoid having an additional function on the stack
by inlining on_each_cpu_cond() and on_each_cpu().

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Nadav Amit <namit@vmware.com>
[ Minor edits. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210220231712.2475218-10-namit@vmware.com
---
 include/linux/smp.h | 50 ++++++++++++++++++++++++++++-----------
 kernel/smp.c        | 56 +--------------------------------------------
 kernel/up.c         | 38 +------------------------------
 3 files changed, 37 insertions(+), 107 deletions(-)

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 70c6f62..84a0b48 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -50,30 +50,52 @@ extern unsigned int total_cpus;
 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
 			     int wait);
 
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+			   void *info, bool wait, const struct cpumask *mask);
+
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+
 /*
  * Call a function on all processors
  */
-void on_each_cpu(smp_call_func_t func, void *info, int wait);
+static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+	on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
+}
 
-/*
- * Call a function on processors specified by mask, which might include
- * the local one.
+/**
+ * on_each_cpu_mask(): Run a function on processors specified by
+ * cpumask, which may include the local processor.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ *        on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.  The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
  */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
-		void *info, bool wait);
+static inline void on_each_cpu_mask(const struct cpumask *mask,
+				    smp_call_func_t func, void *info, bool wait)
+{
+	on_each_cpu_cond_mask(NULL, func, info, wait, mask);
+}
 
 /*
  * Call a function on each processor for which the supplied function
  * cond_func returns a positive value. This may include the local
- * processor.
+ * processor.  May be used during early boot while early_boot_irqs_disabled is
+ * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
  */
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait);
-
-void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, const struct cpumask *mask);
-
-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
+				    smp_call_func_t func, void *info, bool wait)
+{
+	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
+}
 
 #ifdef CONFIG_SMP
 
diff --git a/kernel/smp.c b/kernel/smp.c
index c8a5a1f..b6375d7 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -848,55 +848,6 @@ void __init smp_init(void)
 }
 
 /*
- * Call a function on all processors.  May be used during early boot while
- * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
- * of local_irq_disable/enable().
- */
-void on_each_cpu(smp_call_func_t func, void *info, int wait)
-{
-	unsigned long flags;
-
-	preempt_disable();
-	smp_call_function(func, info, wait);
-	local_irq_save(flags);
-	func(info);
-	local_irq_restore(flags);
-	preempt_enable();
-}
-EXPORT_SYMBOL(on_each_cpu);
-
-/**
- * on_each_cpu_mask(): Run a function on processors specified by
- * cpumask, which may include the local processor.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- *        on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.  The
- * exception is that it may be used during early boot while
- * early_boot_irqs_disabled is set.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
-			void *info, bool wait)
-{
-	unsigned int scf_flags;
-
-	scf_flags = SCF_RUN_LOCAL;
-	if (wait)
-		scf_flags |= SCF_WAIT;
-
-	preempt_disable();
-	smp_call_function_many_cond(mask, func, info, scf_flags, NULL);
-	preempt_enable();
-}
-EXPORT_SYMBOL(on_each_cpu_mask);
-
-/*
  * on_each_cpu_cond(): Call a function on each processor for which
  * the supplied function cond_func returns true, optionally waiting
  * for all the required CPUs to finish. This may include the local
@@ -932,13 +883,6 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait)
-{
-	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
-}
-EXPORT_SYMBOL(on_each_cpu_cond);
-
 static void do_nothing(void *unused)
 {
 }
diff --git a/kernel/up.c b/kernel/up.c
index c6f323d..bf20b4a 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -36,35 +36,6 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 }
 EXPORT_SYMBOL(smp_call_function_single_async);
 
-void on_each_cpu(smp_call_func_t func, void *info, int wait)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	func(info);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL(on_each_cpu);
-
-/*
- * Note we still need to test the mask even for UP
- * because we actually can get an empty mask from
- * code that on SMP might call us without the local
- * CPU in the mask.
- */
-void on_each_cpu_mask(const struct cpumask *mask,
-		      smp_call_func_t func, void *info, bool wait)
-{
-	unsigned long flags;
-
-	if (cpumask_test_cpu(0, mask)) {
-		local_irq_save(flags);
-		func(info);
-		local_irq_restore(flags);
-	}
-}
-EXPORT_SYMBOL(on_each_cpu_mask);
-
 /*
  * Preemption is disabled here to make sure the cond_func is called under the
  * same condtions in UP and SMP.
@@ -75,7 +46,7 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 	unsigned long flags;
 
 	preempt_disable();
-	if (cond_func(0, info)) {
+	if ((!cond_func || cond_func(0, info)) && cpumask_test_cpu(0, mask)) {
 		local_irq_save(flags);
 		func(info);
 		local_irq_restore(flags);
@@ -84,13 +55,6 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait)
-{
-	on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
-}
-EXPORT_SYMBOL(on_each_cpu_cond);
-
 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
 {
 	int ret;

      parent reply	other threads:[~2021-03-06 12:14 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-20 23:17 [PATCH v6 0/9] x86/tlb: Concurrent TLB flushes Nadav Amit
2021-02-20 23:17 ` Nadav Amit
2021-02-20 23:17 ` [PATCH v6 1/9] smp: Run functions concurrently in smp_call_function_many_cond() Nadav Amit
2021-03-01 17:10   ` Peter Zijlstra
2021-03-01 19:01     ` Nadav Amit
2021-03-02  7:05     ` [PATCH] smp: Micro-optimize smp_call_function_many_cond() Ingo Molnar
2021-03-02  9:54   ` [tip: x86/mm] smp: Run functions concurrently in smp_call_function_many_cond() tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 2/9] x86/mm/tlb: Unify flush_tlb_func_local() and flush_tlb_func_remote() Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 3/9] x86/mm/tlb: Open-code on_each_cpu_cond_mask() for tlb_is_not_lazy() Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently Nadav Amit
2021-02-20 23:17   ` Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 5/9] x86/mm/tlb: Privatize cpu_tlbstate Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 6/9] x86/mm/tlb: Do not make is_lazy dirty for no reason Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 7/9] cpumask: Mark functions as pure Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 8/9] x86/mm/tlb: Remove unnecessary uses of the inline keyword Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 9/9] smp: inline on_each_cpu_cond() and on_each_cpu() Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] smp: Inline " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=161503277381.398.16737252451693471719.tip-bot2@tip-bot2 \
    --to=tip-bot2@linutronix.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=namit@vmware.com \
    --cc=peterz@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.