linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond()
@ 2020-01-17  9:01 Sebastian Andrzej Siewior
  2020-01-17  9:01 ` [PATCH 1/3] smp: Use smp_cond_func_t as type for the conditional function Sebastian Andrzej Siewior
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Sebastian Andrzej Siewior @ 2020-01-17  9:01 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Thomas Gleixner, Peter Zijlstra

x86 is using on_each_cpu_cond_mask() in native_flush_tlb_others() in a
preempt disabled section. The memory allocation in
on_each_cpu_cond_mask() gives me a headache on RT.
This is an attempt to get rid of the memory allocation and potentially
accelerating the code path :)

Sebastian



^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/3] smp: Use smp_cond_func_t as type for the conditional function
  2020-01-17  9:01 [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond() Sebastian Andrzej Siewior
@ 2020-01-17  9:01 ` Sebastian Andrzej Siewior
  2020-01-24 19:45   ` [tip: smp/core] " tip-bot2 for Sebastian Andrzej Siewior
  2020-01-17  9:01 ` [PATCH 2/3] smp: Add a smp_cond_func_t argument to smp_call_function_many() Sebastian Andrzej Siewior
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 12+ messages in thread
From: Sebastian Andrzej Siewior @ 2020-01-17  9:01 UTC (permalink / raw)
  To: linux-kernel
  Cc: Ingo Molnar, Thomas Gleixner, Peter Zijlstra, Sebastian Andrzej Siewior

Use a typdef for the conditional function instead defining it each time in
the function prototype.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 include/linux/smp.h | 12 ++++++------
 kernel/smp.c        | 11 +++++------
 kernel/up.c         | 11 +++++------
 3 files changed, 16 insertions(+), 18 deletions(-)

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 6fc856c9eda58..4734416855aad 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -15,6 +15,7 @@
 #include <linux/llist.h>
 
 typedef void (*smp_call_func_t)(void *info);
+typedef bool (*smp_cond_func_t)(int cpu, void *info);
 struct __call_single_data {
 	struct llist_node llist;
 	smp_call_func_t func;
@@ -49,13 +50,12 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
  * cond_func returns a positive value. This may include the local
  * processor.
  */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-		smp_call_func_t func, void *info, bool wait,
-		gfp_t gfp_flags);
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+		      void *info, bool wait, gfp_t gfp_flags);
 
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-		smp_call_func_t func, void *info, bool wait,
-		gfp_t gfp_flags, const struct cpumask *mask);
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+			   void *info, bool wait, gfp_t gfp_flags,
+			   const struct cpumask *mask);
 
 int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 7dbcb402c2fc0..c64044d68bc62 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -680,9 +680,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * You must not call this function with disabled interrupts or
  * from a hardware interrupt handler or from a bottom half handler.
  */
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-			smp_call_func_t func, void *info, bool wait,
-			gfp_t gfp_flags, const struct cpumask *mask)
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+			   void *info, bool wait, gfp_t gfp_flags,
+			   const struct cpumask *mask)
 {
 	cpumask_var_t cpus;
 	int cpu, ret;
@@ -714,9 +714,8 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-			smp_call_func_t func, void *info, bool wait,
-			gfp_t gfp_flags)
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+		      void *info, bool wait, gfp_t gfp_flags)
 {
 	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
 				cpu_online_mask);
diff --git a/kernel/up.c b/kernel/up.c
index 862b460ab97a8..5c0d4f2bece22 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -68,9 +68,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * Preemption is disabled here to make sure the cond_func is called under the
  * same condtions in UP and SMP.
  */
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-			   smp_call_func_t func, void *info, bool wait,
-			   gfp_t gfp_flags, const struct cpumask *mask)
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+			   void *info, bool wait, gfp_t gfp_flags,
+			   const struct cpumask *mask)
 {
 	unsigned long flags;
 
@@ -84,9 +84,8 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-		      smp_call_func_t func, void *info, bool wait,
-		      gfp_t gfp_flags)
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+		      void *info, bool wait, gfp_t gfp_flags)
 {
 	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
 }
-- 
2.25.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/3] smp: Add a smp_cond_func_t argument to smp_call_function_many()
  2020-01-17  9:01 [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond() Sebastian Andrzej Siewior
  2020-01-17  9:01 ` [PATCH 1/3] smp: Use smp_cond_func_t as type for the conditional function Sebastian Andrzej Siewior
@ 2020-01-17  9:01 ` Sebastian Andrzej Siewior
  2020-01-17 13:15   ` Peter Zijlstra
  2020-01-24 19:45   ` [tip: smp/core] " tip-bot2 for Sebastian Andrzej Siewior
  2020-01-17  9:01 ` [PATCH 3/3] smp: Remove allocation mask from on_each_cpu_cond.*() Sebastian Andrzej Siewior
  2020-01-17 15:00 ` [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond() Peter Zijlstra
  3 siblings, 2 replies; 12+ messages in thread
From: Sebastian Andrzej Siewior @ 2020-01-17  9:01 UTC (permalink / raw)
  To: linux-kernel
  Cc: Ingo Molnar, Thomas Gleixner, Peter Zijlstra, Sebastian Andrzej Siewior

on_each_cpu_cond_mask() allocates a new CPU mask. The newly allocated
mask is a subset of the provided mask based on the conditional function.
This memory allocation could be avoided by extending
smp_call_function_many() with the conditional function and performing the
remote function call based on the mask and the conditional function.

Rename smp_call_function_many() to smp_call_function_many_cond() and add
the smp_cond_func_t argument. If smp_cond_func_t is provided then it is
used before invoking the function.
Provide smp_call_function_many() with cond_func set to NULL.
Let on_each_cpu_cond_mask() use smp_call_function_many_cond().

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 kernel/smp.c | 77 ++++++++++++++++++++++++----------------------------
 1 file changed, 36 insertions(+), 41 deletions(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index c64044d68bc62..e17e6344ab54d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -395,22 +395,9 @@ int smp_call_function_any(const struct cpumask *mask,
 }
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
-/**
- * smp_call_function_many(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- *        on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler. Preemption
- * must be disabled when calling this function.
- */
-void smp_call_function_many(const struct cpumask *mask,
-			    smp_call_func_t func, void *info, bool wait)
+static void smp_call_function_many_cond(const struct cpumask *mask,
+					smp_call_func_t func, void *info,
+					bool wait, smp_cond_func_t cond_func)
 {
 	struct call_function_data *cfd;
 	int cpu, next_cpu, this_cpu = smp_processor_id();
@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
 
 	/* Fastpath: do that cpu by itself. */
 	if (next_cpu >= nr_cpu_ids) {
-		smp_call_function_single(cpu, func, info, wait);
+		if (!cond_func || (cond_func && cond_func(cpu, info)))
+			smp_call_function_single(cpu, func, info, wait);
 		return;
 	}
 
@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
 	for_each_cpu(cpu, cfd->cpumask) {
 		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
 
+		if (cond_func && !cond_func(cpu, info))
+			continue;
+
 		csd_lock(csd);
 		if (wait)
 			csd->flags |= CSD_FLAG_SYNCHRONOUS;
@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
 		}
 	}
 }
+
+/**
+ * smp_call_function_many(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ *        on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. Preemption
+ * must be disabled when calling this function.
+ */
+void smp_call_function_many(const struct cpumask *mask,
+			    smp_call_func_t func, void *info, bool wait)
+{
+	smp_call_function_many_cond(mask, func, info, wait, NULL);
+}
 EXPORT_SYMBOL(smp_call_function_many);
 
 /**
@@ -684,33 +695,17 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 			   void *info, bool wait, gfp_t gfp_flags,
 			   const struct cpumask *mask)
 {
-	cpumask_var_t cpus;
-	int cpu, ret;
+	int cpu = get_cpu();
 
-	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
+	smp_call_function_many_cond(mask, func, info, wait, cond_func);
+	if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
+		unsigned long flags;
 
-	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
-		preempt_disable();
-		for_each_cpu(cpu, mask)
-			if (cond_func(cpu, info))
-				__cpumask_set_cpu(cpu, cpus);
-		on_each_cpu_mask(cpus, func, info, wait);
-		preempt_enable();
-		free_cpumask_var(cpus);
-	} else {
-		/*
-		 * No free cpumask, bother. No matter, we'll
-		 * just have to IPI them one by one.
-		 */
-		preempt_disable();
-		for_each_cpu(cpu, mask)
-			if (cond_func(cpu, info)) {
-				ret = smp_call_function_single(cpu, func,
-								info, wait);
-				WARN_ON_ONCE(ret);
-			}
-		preempt_enable();
+		local_irq_save(flags);
+		func(info);
+		local_irq_restore(flags);
 	}
+	put_cpu();
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-- 
2.25.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/3] smp: Remove allocation mask from on_each_cpu_cond.*()
  2020-01-17  9:01 [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond() Sebastian Andrzej Siewior
  2020-01-17  9:01 ` [PATCH 1/3] smp: Use smp_cond_func_t as type for the conditional function Sebastian Andrzej Siewior
  2020-01-17  9:01 ` [PATCH 2/3] smp: Add a smp_cond_func_t argument to smp_call_function_many() Sebastian Andrzej Siewior
@ 2020-01-17  9:01 ` Sebastian Andrzej Siewior
  2020-01-24 19:45   ` [tip: smp/core] " tip-bot2 for Sebastian Andrzej Siewior
  2020-01-17 15:00 ` [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond() Peter Zijlstra
  3 siblings, 1 reply; 12+ messages in thread
From: Sebastian Andrzej Siewior @ 2020-01-17  9:01 UTC (permalink / raw)
  To: linux-kernel
  Cc: Ingo Molnar, Thomas Gleixner, Peter Zijlstra, Sebastian Andrzej Siewior

The allocation mask is no longer used by on_each_cpu_cond() and
on_each_cpu_cond_mask() and ca be removed.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 arch/x86/mm/tlb.c   |  2 +-
 fs/buffer.c         |  2 +-
 include/linux/smp.h |  5 ++---
 kernel/smp.c        | 13 +++----------
 kernel/up.c         |  7 +++----
 mm/slub.c           |  2 +-
 6 files changed, 11 insertions(+), 20 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index e6a9edc5baaf0..66f96f21a7b60 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
 			       (void *)info, 1);
 	else
 		on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
-				(void *)info, 1, GFP_ATOMIC, cpumask);
+				(void *)info, 1, cpumask);
 }
 
 /*
diff --git a/fs/buffer.c b/fs/buffer.c
index 18a87ec8a465b..b8d28370cfd7f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
 
 void invalidate_bh_lrus(void)
 {
-	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
+	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 4734416855aad..cbc9162689d0f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -51,11 +51,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
  * processor.
  */
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait, gfp_t gfp_flags);
+		      void *info, bool wait);
 
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, gfp_t gfp_flags,
-			   const struct cpumask *mask);
+			   void *info, bool wait, const struct cpumask *mask);
 
 int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
diff --git a/kernel/smp.c b/kernel/smp.c
index e17e6344ab54d..3b7bedc97af38 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -679,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * @info:	An arbitrary pointer to pass to both functions.
  * @wait:	If true, wait (atomically) until function has
  *		completed on other CPUs.
- * @gfp_flags:	GFP flags to use when allocating the cpumask
- *		used internally by the function.
- *
- * The function might sleep if the GFP flags indicates a non
- * atomic allocation is allowed.
  *
  * Preemption is disabled to protect against CPUs going offline but not online.
  * CPUs going online during the call will not be seen or sent an IPI.
@@ -692,8 +687,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * from a hardware interrupt handler or from a bottom half handler.
  */
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, gfp_t gfp_flags,
-			   const struct cpumask *mask)
+			   void *info, bool wait, const struct cpumask *mask)
 {
 	int cpu = get_cpu();
 
@@ -710,10 +704,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait, gfp_t gfp_flags)
+		      void *info, bool wait)
 {
-	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
-				cpu_online_mask);
+	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
 }
 EXPORT_SYMBOL(on_each_cpu_cond);
 
diff --git a/kernel/up.c b/kernel/up.c
index 5c0d4f2bece22..53144d0562522 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -69,8 +69,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * same condtions in UP and SMP.
  */
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, gfp_t gfp_flags,
-			   const struct cpumask *mask)
+			   void *info, bool wait, const struct cpumask *mask)
 {
 	unsigned long flags;
 
@@ -85,9 +84,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait, gfp_t gfp_flags)
+		      void *info, bool wait)
 {
-	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
+	on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
 }
 EXPORT_SYMBOL(on_each_cpu_cond);
 
diff --git a/mm/slub.c b/mm/slub.c
index d11389710b12d..e77f19db41f16 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info)
 
 static void flush_all(struct kmem_cache *s)
 {
-	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
+	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
 }
 
 /*
-- 
2.25.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] smp: Add a smp_cond_func_t argument to smp_call_function_many()
  2020-01-17  9:01 ` [PATCH 2/3] smp: Add a smp_cond_func_t argument to smp_call_function_many() Sebastian Andrzej Siewior
@ 2020-01-17 13:15   ` Peter Zijlstra
  2020-01-17 14:41     ` [PATCH 2/3 v2] " Sebastian Andrzej Siewior
  2020-01-24 19:45   ` [tip: smp/core] " tip-bot2 for Sebastian Andrzej Siewior
  1 sibling, 1 reply; 12+ messages in thread
From: Peter Zijlstra @ 2020-01-17 13:15 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: linux-kernel, Ingo Molnar, Thomas Gleixner, Rik van Riel

On Fri, Jan 17, 2020 at 10:01:36AM +0100, Sebastian Andrzej Siewior wrote:

> @@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
>  
>  	/* Fastpath: do that cpu by itself. */
>  	if (next_cpu >= nr_cpu_ids) {
> +		if (!cond_func || (cond_func && cond_func(cpu, info)))
> +			smp_call_function_single(cpu, func, info, wait);

Can't we write that like:

		if (!cond_func || cond_func(cpu, info))

>  		return;
>  	}
>  
> @@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
>  	for_each_cpu(cpu, cfd->cpumask) {
>  		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
>  
> +		if (cond_func && !cond_func(cpu, info))
> +			continue;
> +
>  		csd_lock(csd);
>  		if (wait)
>  			csd->flags |= CSD_FLAG_SYNCHRONOUS;
> @@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
>  		}
>  	}
>  }
> +
> +/**
> + * smp_call_function_many(): Run a function on a set of other CPUs.
> + * @mask: The set of cpus to run on (only runs on online subset).
> + * @func: The function to run. This must be fast and non-blocking.
> + * @info: An arbitrary pointer to pass to the function.
> + * @wait: If true, wait (atomically) until function has completed
> + *        on other CPUs.
> + *
> + * If @wait is true, then returns once @func has returned.
> + *
> + * You must not call this function with disabled interrupts or from a
> + * hardware interrupt handler or from a bottom half handler. Preemption
> + * must be disabled when calling this function.
> + */
> +void smp_call_function_many(const struct cpumask *mask,
> +			    smp_call_func_t func, void *info, bool wait)
> +{
> +	smp_call_function_many_cond(mask, func, info, wait, NULL);
> +}
>  EXPORT_SYMBOL(smp_call_function_many);
>  
>  /**
> @@ -684,33 +695,17 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
>  			   void *info, bool wait, gfp_t gfp_flags,
>  			   const struct cpumask *mask)
>  {
> +	int cpu = get_cpu();
>  
> +	smp_call_function_many_cond(mask, func, info, wait, cond_func);
> +	if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
> +		unsigned long flags;
>  
> +		local_irq_save(flags);
> +		func(info);
> +		local_irq_restore(flags);
>  	}
> +	put_cpu();
>  }
>  EXPORT_SYMBOL(on_each_cpu_cond_mask);

But yes, over-all this seems like a very nice cleanup.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/3 v2] smp: Add a smp_cond_func_t argument to smp_call_function_many()
  2020-01-17 13:15   ` Peter Zijlstra
@ 2020-01-17 14:41     ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 12+ messages in thread
From: Sebastian Andrzej Siewior @ 2020-01-17 14:41 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, Ingo Molnar, Thomas Gleixner, Rik van Riel

on_each_cpu_cond_mask() allocates a new CPU mask. The newly allocated
mask is a subset of the provided mask based on the conditional function.
This memory allocation could be avoided by extending
smp_call_function_many() with the conditional function and performing the
remote function call based on the mask and the conditional function.

Rename smp_call_function_many() to smp_call_function_many_cond() and add
the smp_cond_func_t argument. If smp_cond_func_t is provided then it is
used before invoking the function.
Provide smp_call_function_many() with cond_func set to NULL.
Let on_each_cpu_cond_mask() use smp_call_function_many_cond().

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
v1…v2: Use "if (!cond_func || cond_func(cpu, info))" instead of
       "if (!cond_func || (cond_func && cond_func(cpu, info)))" as
       suggested by Peter

 kernel/smp.c | 77 ++++++++++++++++++++++++----------------------------
 1 file changed, 36 insertions(+), 41 deletions(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index c64044d68bc62..bf374d9533f83 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -395,22 +395,9 @@ int smp_call_function_any(const struct cpumask *mask,
 }
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
-/**
- * smp_call_function_many(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- *        on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler. Preemption
- * must be disabled when calling this function.
- */
-void smp_call_function_many(const struct cpumask *mask,
-			    smp_call_func_t func, void *info, bool wait)
+static void smp_call_function_many_cond(const struct cpumask *mask,
+					smp_call_func_t func, void *info,
+					bool wait, smp_cond_func_t cond_func)
 {
 	struct call_function_data *cfd;
 	int cpu, next_cpu, this_cpu = smp_processor_id();
@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
 
 	/* Fastpath: do that cpu by itself. */
 	if (next_cpu >= nr_cpu_ids) {
-		smp_call_function_single(cpu, func, info, wait);
+		if (!cond_func || cond_func(cpu, info))
+			smp_call_function_single(cpu, func, info, wait);
 		return;
 	}
 
@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
 	for_each_cpu(cpu, cfd->cpumask) {
 		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
 
+		if (cond_func && !cond_func(cpu, info))
+			continue;
+
 		csd_lock(csd);
 		if (wait)
 			csd->flags |= CSD_FLAG_SYNCHRONOUS;
@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
 		}
 	}
 }
+
+/**
+ * smp_call_function_many(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ *        on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. Preemption
+ * must be disabled when calling this function.
+ */
+void smp_call_function_many(const struct cpumask *mask,
+			    smp_call_func_t func, void *info, bool wait)
+{
+	smp_call_function_many_cond(mask, func, info, wait, NULL);
+}
 EXPORT_SYMBOL(smp_call_function_many);
 
 /**
@@ -684,33 +695,17 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 			   void *info, bool wait, gfp_t gfp_flags,
 			   const struct cpumask *mask)
 {
-	cpumask_var_t cpus;
-	int cpu, ret;
+	int cpu = get_cpu();
 
-	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
+	smp_call_function_many_cond(mask, func, info, wait, cond_func);
+	if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
+		unsigned long flags;
 
-	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
-		preempt_disable();
-		for_each_cpu(cpu, mask)
-			if (cond_func(cpu, info))
-				__cpumask_set_cpu(cpu, cpus);
-		on_each_cpu_mask(cpus, func, info, wait);
-		preempt_enable();
-		free_cpumask_var(cpus);
-	} else {
-		/*
-		 * No free cpumask, bother. No matter, we'll
-		 * just have to IPI them one by one.
-		 */
-		preempt_disable();
-		for_each_cpu(cpu, mask)
-			if (cond_func(cpu, info)) {
-				ret = smp_call_function_single(cpu, func,
-								info, wait);
-				WARN_ON_ONCE(ret);
-			}
-		preempt_enable();
+		local_irq_save(flags);
+		func(info);
+		local_irq_restore(flags);
 	}
+	put_cpu();
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-- 
2.25.0

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond()
  2020-01-17  9:01 [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond() Sebastian Andrzej Siewior
                   ` (2 preceding siblings ...)
  2020-01-17  9:01 ` [PATCH 3/3] smp: Remove allocation mask from on_each_cpu_cond.*() Sebastian Andrzej Siewior
@ 2020-01-17 15:00 ` Peter Zijlstra
  3 siblings, 0 replies; 12+ messages in thread
From: Peter Zijlstra @ 2020-01-17 15:00 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior; +Cc: linux-kernel, Ingo Molnar, Thomas Gleixner

On Fri, Jan 17, 2020 at 10:01:34AM +0100, Sebastian Andrzej Siewior wrote:
> x86 is using on_each_cpu_cond_mask() in native_flush_tlb_others() in a
> preempt disabled section. The memory allocation in
> on_each_cpu_cond_mask() gives me a headache on RT.
> This is an attempt to get rid of the memory allocation and potentially
> accelerating the code path :)

Looks good to me,

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>



^ permalink raw reply	[flat|nested] 12+ messages in thread

* [tip: smp/core] smp: Remove allocation mask from on_each_cpu_cond.*()
  2020-01-17  9:01 ` [PATCH 3/3] smp: Remove allocation mask from on_each_cpu_cond.*() Sebastian Andrzej Siewior
@ 2020-01-24 19:45   ` tip-bot2 for Sebastian Andrzej Siewior
  0 siblings, 0 replies; 12+ messages in thread
From: tip-bot2 for Sebastian Andrzej Siewior @ 2020-01-24 19:45 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Sebastian Andrzej Siewior, Thomas Gleixner,
	Peter Zijlstra (Intel),
	x86, LKML

The following commit has been merged into the smp/core branch of tip:

Commit-ID:     cb923159bbb8cc8fe09c19a3435ee11fd546f3d3
Gitweb:        https://git.kernel.org/tip/cb923159bbb8cc8fe09c19a3435ee11fd546f3d3
Author:        Sebastian Andrzej Siewior <bigeasy@linutronix.de>
AuthorDate:    Fri, 17 Jan 2020 10:01:37 +01:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Fri, 24 Jan 2020 20:40:09 +01:00

smp: Remove allocation mask from on_each_cpu_cond.*()

The allocation mask is no longer used by on_each_cpu_cond() and
on_each_cpu_cond_mask() and can be removed.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200117090137.1205765-4-bigeasy@linutronix.de

---
 arch/x86/mm/tlb.c   |  2 +-
 fs/buffer.c         |  2 +-
 include/linux/smp.h |  5 ++---
 kernel/smp.c        | 13 +++----------
 kernel/up.c         |  7 +++----
 mm/slub.c           |  2 +-
 6 files changed, 11 insertions(+), 20 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index e6a9edc..66f96f2 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
 			       (void *)info, 1);
 	else
 		on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
-				(void *)info, 1, GFP_ATOMIC, cpumask);
+				(void *)info, 1, cpumask);
 }
 
 /*
diff --git a/fs/buffer.c b/fs/buffer.c
index 18a87ec..b8d2837 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
 
 void invalidate_bh_lrus(void)
 {
-	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
+	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 4734416..cbc9162 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -51,11 +51,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
  * processor.
  */
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait, gfp_t gfp_flags);
+		      void *info, bool wait);
 
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, gfp_t gfp_flags,
-			   const struct cpumask *mask);
+			   void *info, bool wait, const struct cpumask *mask);
 
 int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
diff --git a/kernel/smp.c b/kernel/smp.c
index e17e634..3b7bedc 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -679,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * @info:	An arbitrary pointer to pass to both functions.
  * @wait:	If true, wait (atomically) until function has
  *		completed on other CPUs.
- * @gfp_flags:	GFP flags to use when allocating the cpumask
- *		used internally by the function.
- *
- * The function might sleep if the GFP flags indicates a non
- * atomic allocation is allowed.
  *
  * Preemption is disabled to protect against CPUs going offline but not online.
  * CPUs going online during the call will not be seen or sent an IPI.
@@ -692,8 +687,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * from a hardware interrupt handler or from a bottom half handler.
  */
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, gfp_t gfp_flags,
-			   const struct cpumask *mask)
+			   void *info, bool wait, const struct cpumask *mask)
 {
 	int cpu = get_cpu();
 
@@ -710,10 +704,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait, gfp_t gfp_flags)
+		      void *info, bool wait)
 {
-	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
-				cpu_online_mask);
+	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
 }
 EXPORT_SYMBOL(on_each_cpu_cond);
 
diff --git a/kernel/up.c b/kernel/up.c
index 5c0d4f2..53144d0 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -69,8 +69,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * same condtions in UP and SMP.
  */
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-			   void *info, bool wait, gfp_t gfp_flags,
-			   const struct cpumask *mask)
+			   void *info, bool wait, const struct cpumask *mask)
 {
 	unsigned long flags;
 
@@ -85,9 +84,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-		      void *info, bool wait, gfp_t gfp_flags)
+		      void *info, bool wait)
 {
-	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
+	on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
 }
 EXPORT_SYMBOL(on_each_cpu_cond);
 
diff --git a/mm/slub.c b/mm/slub.c
index 8eafccf..2e1a577 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info)
 
 static void flush_all(struct kmem_cache *s)
 {
-	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
+	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
 }
 
 /*

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [tip: smp/core] smp: Use smp_cond_func_t as type for the conditional function
  2020-01-17  9:01 ` [PATCH 1/3] smp: Use smp_cond_func_t as type for the conditional function Sebastian Andrzej Siewior
@ 2020-01-24 19:45   ` tip-bot2 for Sebastian Andrzej Siewior
  0 siblings, 0 replies; 12+ messages in thread
From: tip-bot2 for Sebastian Andrzej Siewior @ 2020-01-24 19:45 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Sebastian Andrzej Siewior, Thomas Gleixner,
	Peter Zijlstra (Intel),
	x86, LKML

The following commit has been merged into the smp/core branch of tip:

Commit-ID:     5671d814dbd204b4ecc705045b5f1a647bff6f3b
Gitweb:        https://git.kernel.org/tip/5671d814dbd204b4ecc705045b5f1a647bff6f3b
Author:        Sebastian Andrzej Siewior <bigeasy@linutronix.de>
AuthorDate:    Fri, 17 Jan 2020 10:01:35 +01:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Fri, 24 Jan 2020 20:40:08 +01:00

smp: Use smp_cond_func_t as type for the conditional function

Use a typdef for the conditional function instead defining it each time in
the function prototype.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200117090137.1205765-2-bigeasy@linutronix.de

---
 include/linux/smp.h | 12 ++++++------
 kernel/smp.c        | 11 +++++------
 kernel/up.c         | 11 +++++------
 3 files changed, 16 insertions(+), 18 deletions(-)

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 6fc856c..4734416 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -15,6 +15,7 @@
 #include <linux/llist.h>
 
 typedef void (*smp_call_func_t)(void *info);
+typedef bool (*smp_cond_func_t)(int cpu, void *info);
 struct __call_single_data {
 	struct llist_node llist;
 	smp_call_func_t func;
@@ -49,13 +50,12 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
  * cond_func returns a positive value. This may include the local
  * processor.
  */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-		smp_call_func_t func, void *info, bool wait,
-		gfp_t gfp_flags);
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+		      void *info, bool wait, gfp_t gfp_flags);
 
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-		smp_call_func_t func, void *info, bool wait,
-		gfp_t gfp_flags, const struct cpumask *mask);
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+			   void *info, bool wait, gfp_t gfp_flags,
+			   const struct cpumask *mask);
 
 int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 7dbcb40..c64044d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -680,9 +680,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * You must not call this function with disabled interrupts or
  * from a hardware interrupt handler or from a bottom half handler.
  */
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-			smp_call_func_t func, void *info, bool wait,
-			gfp_t gfp_flags, const struct cpumask *mask)
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+			   void *info, bool wait, gfp_t gfp_flags,
+			   const struct cpumask *mask)
 {
 	cpumask_var_t cpus;
 	int cpu, ret;
@@ -714,9 +714,8 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-			smp_call_func_t func, void *info, bool wait,
-			gfp_t gfp_flags)
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+		      void *info, bool wait, gfp_t gfp_flags)
 {
 	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
 				cpu_online_mask);
diff --git a/kernel/up.c b/kernel/up.c
index 862b460..5c0d4f2 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -68,9 +68,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * Preemption is disabled here to make sure the cond_func is called under the
  * same condtions in UP and SMP.
  */
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-			   smp_call_func_t func, void *info, bool wait,
-			   gfp_t gfp_flags, const struct cpumask *mask)
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+			   void *info, bool wait, gfp_t gfp_flags,
+			   const struct cpumask *mask)
 {
 	unsigned long flags;
 
@@ -84,9 +84,8 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-		      smp_call_func_t func, void *info, bool wait,
-		      gfp_t gfp_flags)
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+		      void *info, bool wait, gfp_t gfp_flags)
 {
 	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
 }

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [tip: smp/core] smp: Add a smp_cond_func_t argument to smp_call_function_many()
  2020-01-17  9:01 ` [PATCH 2/3] smp: Add a smp_cond_func_t argument to smp_call_function_many() Sebastian Andrzej Siewior
  2020-01-17 13:15   ` Peter Zijlstra
@ 2020-01-24 19:45   ` tip-bot2 for Sebastian Andrzej Siewior
  2020-01-27  8:39     ` [PATCH] smp: Remove superfluous cond_func check in smp_call_function_many_cond() Sebastian Andrzej Siewior
  1 sibling, 1 reply; 12+ messages in thread
From: tip-bot2 for Sebastian Andrzej Siewior @ 2020-01-24 19:45 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Sebastian Andrzej Siewior, Thomas Gleixner,
	Peter Zijlstra (Intel),
	x86, LKML

The following commit has been merged into the smp/core branch of tip:

Commit-ID:     67719ef25eeb2048b11befa6a757aeb3848b5df1
Gitweb:        https://git.kernel.org/tip/67719ef25eeb2048b11befa6a757aeb3848b5df1
Author:        Sebastian Andrzej Siewior <bigeasy@linutronix.de>
AuthorDate:    Fri, 17 Jan 2020 10:01:36 +01:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Fri, 24 Jan 2020 20:40:09 +01:00

smp: Add a smp_cond_func_t argument to smp_call_function_many()

on_each_cpu_cond_mask() allocates a new CPU mask. The newly allocated
mask is a subset of the provided mask based on the conditional function.

This memory allocation can be avoided by extending smp_call_function_many()
with the conditional function and performing the remote function call based
on the mask and the conditional function.

Rename smp_call_function_many() to smp_call_function_many_cond() and add
the smp_cond_func_t argument. If smp_cond_func_t is provided then it is
used before invoking the function.  Provide smp_call_function_many() with
cond_func set to NULL.  Let on_each_cpu_cond_mask() use
smp_call_function_many_cond().

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200117090137.1205765-3-bigeasy@linutronix.de

---
 kernel/smp.c | 81 +++++++++++++++++++++++----------------------------
 1 file changed, 38 insertions(+), 43 deletions(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index c64044d..e17e634 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -395,22 +395,9 @@ call:
 }
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
-/**
- * smp_call_function_many(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- *        on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler. Preemption
- * must be disabled when calling this function.
- */
-void smp_call_function_many(const struct cpumask *mask,
-			    smp_call_func_t func, void *info, bool wait)
+static void smp_call_function_many_cond(const struct cpumask *mask,
+					smp_call_func_t func, void *info,
+					bool wait, smp_cond_func_t cond_func)
 {
 	struct call_function_data *cfd;
 	int cpu, next_cpu, this_cpu = smp_processor_id();
@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
 
 	/* Fastpath: do that cpu by itself. */
 	if (next_cpu >= nr_cpu_ids) {
-		smp_call_function_single(cpu, func, info, wait);
+		if (!cond_func || (cond_func && cond_func(cpu, info)))
+			smp_call_function_single(cpu, func, info, wait);
 		return;
 	}
 
@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
 	for_each_cpu(cpu, cfd->cpumask) {
 		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
 
+		if (cond_func && !cond_func(cpu, info))
+			continue;
+
 		csd_lock(csd);
 		if (wait)
 			csd->flags |= CSD_FLAG_SYNCHRONOUS;
@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
 		}
 	}
 }
+
+/**
+ * smp_call_function_many(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ *        on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. Preemption
+ * must be disabled when calling this function.
+ */
+void smp_call_function_many(const struct cpumask *mask,
+			    smp_call_func_t func, void *info, bool wait)
+{
+	smp_call_function_many_cond(mask, func, info, wait, NULL);
+}
 EXPORT_SYMBOL(smp_call_function_many);
 
 /**
@@ -684,33 +695,17 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 			   void *info, bool wait, gfp_t gfp_flags,
 			   const struct cpumask *mask)
 {
-	cpumask_var_t cpus;
-	int cpu, ret;
-
-	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
-
-	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
-		preempt_disable();
-		for_each_cpu(cpu, mask)
-			if (cond_func(cpu, info))
-				__cpumask_set_cpu(cpu, cpus);
-		on_each_cpu_mask(cpus, func, info, wait);
-		preempt_enable();
-		free_cpumask_var(cpus);
-	} else {
-		/*
-		 * No free cpumask, bother. No matter, we'll
-		 * just have to IPI them one by one.
-		 */
-		preempt_disable();
-		for_each_cpu(cpu, mask)
-			if (cond_func(cpu, info)) {
-				ret = smp_call_function_single(cpu, func,
-								info, wait);
-				WARN_ON_ONCE(ret);
-			}
-		preempt_enable();
+	int cpu = get_cpu();
+
+	smp_call_function_many_cond(mask, func, info, wait, cond_func);
+	if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+		func(info);
+		local_irq_restore(flags);
 	}
+	put_cpu();
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH] smp: Remove superfluous cond_func check in smp_call_function_many_cond()
  2020-01-24 19:45   ` [tip: smp/core] " tip-bot2 for Sebastian Andrzej Siewior
@ 2020-01-27  8:39     ` Sebastian Andrzej Siewior
  2020-01-28 14:46       ` [tip: smp/urgent] " tip-bot2 for Sebastian Andrzej Siewior
  0 siblings, 1 reply; 12+ messages in thread
From: Sebastian Andrzej Siewior @ 2020-01-27  8:39 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-tip-commits, Thomas Gleixner, Peter Zijlstra (Intel), x86

It was requested to remove the cond_func check but the follow up patch
was overlooked. Here is an incremental update.

Link: https://lore.kernel.org/lkml/20200117131510.GA14914@hirez.programming.kicks-ass.net/
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 kernel/smp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index 3b7bedc97af38..d0ada39eb4d41 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -435,7 +435,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
 
 	/* Fastpath: do that cpu by itself. */
 	if (next_cpu >= nr_cpu_ids) {
-		if (!cond_func || (cond_func && cond_func(cpu, info)))
+		if (!cond_func || cond_func(cpu, info))
 			smp_call_function_single(cpu, func, info, wait);
 		return;
 	}
-- 
2.25.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [tip: smp/urgent] smp: Remove superfluous cond_func check in smp_call_function_many_cond()
  2020-01-27  8:39     ` [PATCH] smp: Remove superfluous cond_func check in smp_call_function_many_cond() Sebastian Andrzej Siewior
@ 2020-01-28 14:46       ` tip-bot2 for Sebastian Andrzej Siewior
  0 siblings, 0 replies; 12+ messages in thread
From: tip-bot2 for Sebastian Andrzej Siewior @ 2020-01-28 14:46 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: Sebastian Andrzej Siewior, Thomas Gleixner, x86, LKML

The following commit has been merged into the smp/urgent branch of tip:

Commit-ID:     25a3a15417cf4311f812f5a2b18c5fc2809f66d7
Gitweb:        https://git.kernel.org/tip/25a3a15417cf4311f812f5a2b18c5fc2809f66d7
Author:        Sebastian Andrzej Siewior <bigeasy@linutronix.de>
AuthorDate:    Mon, 27 Jan 2020 09:39:15 +01:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Tue, 28 Jan 2020 15:43:00 +01:00

smp: Remove superfluous cond_func check in smp_call_function_many_cond()

It was requested to remove the cond_func check but the follow up patch was
overlooked. Remove it now.

Fixes: 67719ef25eeb ("smp: Add a smp_cond_func_t argument to smp_call_function_many()")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20200127083915.434tdkztorkklpdu@linutronix.de

---
 kernel/smp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index 3b7bedc..d0ada39 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -435,7 +435,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
 
 	/* Fastpath: do that cpu by itself. */
 	if (next_cpu >= nr_cpu_ids) {
-		if (!cond_func || (cond_func && cond_func(cpu, info)))
+		if (!cond_func || cond_func(cpu, info))
 			smp_call_function_single(cpu, func, info, wait);
 		return;
 	}

^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2020-01-28 14:46 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-17  9:01 [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond() Sebastian Andrzej Siewior
2020-01-17  9:01 ` [PATCH 1/3] smp: Use smp_cond_func_t as type for the conditional function Sebastian Andrzej Siewior
2020-01-24 19:45   ` [tip: smp/core] " tip-bot2 for Sebastian Andrzej Siewior
2020-01-17  9:01 ` [PATCH 2/3] smp: Add a smp_cond_func_t argument to smp_call_function_many() Sebastian Andrzej Siewior
2020-01-17 13:15   ` Peter Zijlstra
2020-01-17 14:41     ` [PATCH 2/3 v2] " Sebastian Andrzej Siewior
2020-01-24 19:45   ` [tip: smp/core] " tip-bot2 for Sebastian Andrzej Siewior
2020-01-27  8:39     ` [PATCH] smp: Remove superfluous cond_func check in smp_call_function_many_cond() Sebastian Andrzej Siewior
2020-01-28 14:46       ` [tip: smp/urgent] " tip-bot2 for Sebastian Andrzej Siewior
2020-01-17  9:01 ` [PATCH 3/3] smp: Remove allocation mask from on_each_cpu_cond.*() Sebastian Andrzej Siewior
2020-01-24 19:45   ` [tip: smp/core] " tip-bot2 for Sebastian Andrzej Siewior
2020-01-17 15:00 ` [PATCH 0/3] smp: Avoid memory allocation in on_each_cpu_cond() Peter Zijlstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).