All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nadav Amit <nadav.amit@gmail.com>
To: linux-kernel@vger.kernel.org
Cc: Peter Zijlstra <peterz@infradead.org>,
	Andy Lutomirski <luto@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Nadav Amit <namit@vmware.com>
Subject: [PATCH v6 9/9] smp: inline on_each_cpu_cond() and on_each_cpu()
Date: Sat, 20 Feb 2021 15:17:12 -0800	[thread overview]
Message-ID: <20210220231712.2475218-10-namit@vmware.com> (raw)
In-Reply-To: <20210220231712.2475218-1-namit@vmware.com>

From: Nadav Amit <namit@vmware.com>

Simplify the code and avoid having an additional function on the stack
by inlining on_each_cpu_cond() and on_each_cpu().

Cc: Andy Lutomirski <luto@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Nadav Amit <namit@vmware.com>
---
 include/linux/smp.h | 50 ++++++++++++++++++++++++++++------------
 kernel/smp.c        | 56 ---------------------------------------------
 2 files changed, 36 insertions(+), 70 deletions(-)

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 70c6f6284dcf..84a0b4828f66 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -50,30 +50,52 @@ extern unsigned int total_cpus;
 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
 			     int wait);
 
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+			   void *info, bool wait, const struct cpumask *mask);
+
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+
 /*
  * Call a function on all processors
  */
-void on_each_cpu(smp_call_func_t func, void *info, int wait);
+static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+	on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
+}
 
-/*
- * Call a function on processors specified by mask, which might include
- * the local one.
+/**
+ * on_each_cpu_mask(): Run a function on processors specified by
+ * cpumask, which may include the local processor.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ *        on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.  The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
  */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
-		void *info, bool wait);
+static inline void on_each_cpu_mask(const struct cpumask *mask,
+				    smp_call_func_t func, void *info, bool wait)
+{
+	on_each_cpu_cond_mask(NULL, func, info, wait, mask);
+}
 
 /*
  * Call a function on each processor for which the supplied function
  * cond_func returns a positive value. This may include the local
- * processor.
+ * processor.  May be used during early boot while early_boot_irqs_disabled is
+ * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
  */
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait);
-
-void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, const struct cpumask *mask);
-
-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
+				    smp_call_func_t func, void *info, bool wait)
+{
+	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
+}
 
 #ifdef CONFIG_SMP
 
diff --git a/kernel/smp.c b/kernel/smp.c
index c8a5a1facc1a..b6375d775e93 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -847,55 +847,6 @@ void __init smp_init(void)
 	smp_cpus_done(setup_max_cpus);
 }
 
-/*
- * Call a function on all processors.  May be used during early boot while
- * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
- * of local_irq_disable/enable().
- */
-void on_each_cpu(smp_call_func_t func, void *info, int wait)
-{
-	unsigned long flags;
-
-	preempt_disable();
-	smp_call_function(func, info, wait);
-	local_irq_save(flags);
-	func(info);
-	local_irq_restore(flags);
-	preempt_enable();
-}
-EXPORT_SYMBOL(on_each_cpu);
-
-/**
- * on_each_cpu_mask(): Run a function on processors specified by
- * cpumask, which may include the local processor.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- *        on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.  The
- * exception is that it may be used during early boot while
- * early_boot_irqs_disabled is set.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
-			void *info, bool wait)
-{
-	unsigned int scf_flags;
-
-	scf_flags = SCF_RUN_LOCAL;
-	if (wait)
-		scf_flags |= SCF_WAIT;
-
-	preempt_disable();
-	smp_call_function_many_cond(mask, func, info, scf_flags, NULL);
-	preempt_enable();
-}
-EXPORT_SYMBOL(on_each_cpu_mask);
-
 /*
  * on_each_cpu_cond(): Call a function on each processor for which
  * the supplied function cond_func returns true, optionally waiting
@@ -932,13 +883,6 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait)
-{
-	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
-}
-EXPORT_SYMBOL(on_each_cpu_cond);
-
 static void do_nothing(void *unused)
 {
 }
-- 
2.25.1


  parent reply	other threads:[~2021-02-20 23:31 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-20 23:17 [PATCH v6 0/9] x86/tlb: Concurrent TLB flushes Nadav Amit
2021-02-20 23:17 ` Nadav Amit
2021-02-20 23:17 ` [PATCH v6 1/9] smp: Run functions concurrently in smp_call_function_many_cond() Nadav Amit
2021-03-01 17:10   ` Peter Zijlstra
2021-03-01 19:01     ` Nadav Amit
2021-03-02  7:05     ` [PATCH] smp: Micro-optimize smp_call_function_many_cond() Ingo Molnar
2021-03-02  9:54   ` [tip: x86/mm] smp: Run functions concurrently in smp_call_function_many_cond() tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 2/9] x86/mm/tlb: Unify flush_tlb_func_local() and flush_tlb_func_remote() Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 3/9] x86/mm/tlb: Open-code on_each_cpu_cond_mask() for tlb_is_not_lazy() Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently Nadav Amit
2021-02-20 23:17   ` Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 5/9] x86/mm/tlb: Privatize cpu_tlbstate Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 6/9] x86/mm/tlb: Do not make is_lazy dirty for no reason Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 7/9] cpumask: Mark functions as pure Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` [PATCH v6 8/9] x86/mm/tlb: Remove unnecessary uses of the inline keyword Nadav Amit
2021-03-02  9:54   ` [tip: x86/mm] " tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit
2021-02-20 23:17 ` Nadav Amit [this message]
2021-03-02  9:54   ` [tip: x86/mm] smp: Inline on_each_cpu_cond() and on_each_cpu() tip-bot2 for Nadav Amit
2021-03-06 12:12   ` tip-bot2 for Nadav Amit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210220231712.2475218-10-namit@vmware.com \
    --to=nadav.amit@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=namit@vmware.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.