From: Donghai Qiao <dqiao@redhat.com>
To: akpm@linux-foundation.org, sfr@canb.auug.org.au, arnd@arndb.de,
peterz@infradead.org, heying24@huawei.com,
andriy.shevchenko@linux.intel.com, axboe@kernel.dk,
rdunlap@infradead.org, tglx@linutronix.de, gor@linux.ibm.com
Cc: donghai.w.qiao@gmail.com, linux-kernel@vger.kernel.org,
Donghai Qiao <dqiao@redhat.com>
Subject: [PATCH v2 07/11] smp: change smp_call_function_any() to smp_call_any()
Date: Fri, 22 Apr 2022 16:00:36 -0400 [thread overview]
Message-ID: <20220422200040.93813-8-dqiao@redhat.com> (raw)
In-Reply-To: <20220422200040.93813-1-dqiao@redhat.com>
Rename smp_call_function_any() to smp_call_any() and also make
the changes necessary.
Replace all the invocations of smp_call_function_any() with
smp_call_any() for all.
Actually the kernel consumers can use smp_call() when they want
to use smp_call_function_any(). The extra logics handled by
smp_call_function_any() should be moved out of there and have the
consumers choose the preferred CPU. Because there are quite a few
of the cross call consumers need to run their functions on just
one of the CPUs of a given CPU set, so there is some advantage to
add smp_call_any() to the interface.
Signed-off-by: Donghai Qiao <dqiao@redhat.com>
---
v1 -> v2: removed 'x' from the function names and change XCALL to SMP_CALL from the new macros
arch/arm/kernel/perf_event_v7.c | 6 +-
arch/arm64/kernel/perf_event.c | 6 +-
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +-
drivers/cpufreq/acpi-cpufreq.c | 4 +-
drivers/cpufreq/powernv-cpufreq.c | 12 ++--
drivers/perf/arm_spe_pmu.c | 2 +-
include/linux/smp.h | 12 +---
kernel/smp.c | 78 ++++++++++-------------
8 files changed, 53 insertions(+), 69 deletions(-)
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index eb2190477da1..ae008cba28c4 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1192,9 +1192,9 @@ static void armv7_read_num_pmnc_events(void *info)
static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
{
- return smp_call_function_any(&arm_pmu->supported_cpus,
- armv7_read_num_pmnc_events,
- &arm_pmu->num_events, 1);
+ return smp_call_any(&arm_pmu->supported_cpus,
+ armv7_read_num_pmnc_events,
+ &arm_pmu->num_events, SMP_CALL_TYPE_SYNC);
}
static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cb69ff1e6138..7326c7cc67de 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1186,9 +1186,9 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
};
int ret;
- ret = smp_call_function_any(&cpu_pmu->supported_cpus,
- __armv8pmu_probe_pmu,
- &probe, 1);
+ ret = smp_call_any(&cpu_pmu->supported_cpus,
+ __armv8pmu_probe_pmu,
+ &probe, SMP_CALL_TYPE_SYNC);
if (ret)
return ret;
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 87666275eed9..418c83c3c4b5 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -512,7 +512,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
rr->val = 0;
rr->first = first;
- smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
+ (void) smp_call_any(&d->cpu_mask, mon_event_count, rr, SMP_CALL_TYPE_SYNC);
}
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3d514b82d055..e0c4b3c6575d 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -312,8 +312,8 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
};
int err;
- err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
- WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
+ err = smp_call_any(mask, do_drv_read, &cmd, SMP_CALL_TYPE_SYNC);
+ WARN_ON_ONCE(err); /* smp_call_any() was buggy? */
return cmd.val;
}
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index fddbd1ea1635..d4f45bb9c419 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -507,8 +507,8 @@ static unsigned int powernv_cpufreq_get(unsigned int cpu)
{
struct powernv_smp_call_data freq_data;
- smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
- &freq_data, 1);
+ (void) smp_call_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
+ &freq_data, SMP_CALL_TYPE_SYNC);
return freq_data.freq;
}
@@ -820,8 +820,10 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
* Use smp_call_function to send IPI and execute the
* mtspr on target CPU. We could do that without IPI
* if current CPU is within policy->cpus (core)
+ *
+ * Shouldn't return the value of smp_call_any() ?
*/
- smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+ (void) smp_call_any(policy->cpus, set_pstate, &freq_data, SMP_CALL_TYPE_SYNC);
return 0;
}
@@ -921,8 +923,8 @@ static void powernv_cpufreq_work_fn(struct work_struct *work)
cpus_read_lock();
cpumask_and(&mask, &chip->mask, cpu_online_mask);
- smp_call_function_any(&mask,
- powernv_cpufreq_throttle_check, NULL, 0);
+ (void) smp_call_any(&mask, powernv_cpufreq_throttle_check,
+ NULL, SMP_CALL_TYPE_ASYNC);
if (!chip->restore)
goto out;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d44bcc29d99c..e1059c60feab 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1108,7 +1108,7 @@ static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
cpumask_t *mask = &spe_pmu->supported_cpus;
/* Make sure we probe the hardware on a relevant CPU */
- ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1);
+ ret = smp_call_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, SMP_CALL_TYPE_SYNC);
if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
return -ENXIO;
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 0301faf270bf..3a6663bce18f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -161,6 +161,8 @@ do { \
*(_csd) = CSD_INIT((_func), (_info)); \
} while (0)
+extern int smp_call_any(const struct cpumask *mask, smp_call_func_t func,
+ void *info, unsigned int flags);
/*
* smp_call Interface.
@@ -304,9 +306,6 @@ void smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);
-int smp_call_function_any(const struct cpumask *mask,
- smp_call_func_t func, void *info, int wait);
-
void kick_all_cpus_sync(void);
void wake_up_all_idle_cpus(void);
@@ -355,13 +354,6 @@ static inline void smp_send_reschedule(int cpu) { }
(up_smp_call_function(func, info))
static inline void call_function_init(void) { }
-static inline int
-smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
- void *info, int wait)
-{
- return smp_call_function_single(0, func, info, wait);
-}
-
static inline void kick_all_cpus_sync(void) { }
static inline void wake_up_all_idle_cpus(void) { }
diff --git a/kernel/smp.c b/kernel/smp.c
index 51715633b4f7..6a43ab165aee 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -626,49 +626,6 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
}
EXPORT_SYMBOL(smp_call_function_single);
-/*
- * smp_call_function_any - Run a function on any of the given cpus
- * @mask: The mask of cpus it can run on.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait until function has completed.
- *
- * Returns 0 on success, else a negative status code (if no cpus were online).
- *
- * Selection preference:
- * 1) current cpu if in @mask
- * 2) any cpu of current node if in @mask
- * 3) any other online cpu in @mask
- */
-int smp_call_function_any(const struct cpumask *mask,
- smp_call_func_t func, void *info, int wait)
-{
- unsigned int cpu;
- const struct cpumask *nodemask;
- int ret;
-
- /* Try for same CPU (cheapest) */
- cpu = get_cpu();
- if (cpumask_test_cpu(cpu, mask))
- goto call;
-
- /* Try for same node. */
- nodemask = cpumask_of_node(cpu_to_node(cpu));
- for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
- cpu = cpumask_next_and(cpu, nodemask, mask)) {
- if (cpu_online(cpu))
- goto call;
- }
-
- /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
- cpu = cpumask_any_and(mask, cpu_online_mask);
-call:
- ret = smp_call_function_single(cpu, func, info, wait);
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL_GPL(smp_call_function_any);
-
static void smp_call_function_many_cond(const struct cpumask *mask,
smp_call_func_t func, void *info,
bool wait,
@@ -1276,6 +1233,39 @@ EXPORT_SYMBOL(smp_call_private);
int smp_call_any(const struct cpumask *mask, smp_call_func_t func,
void *info, unsigned int flags)
{
- return 0;
+ int cpu;
+ const struct cpumask *nodemask;
+
+ if (mask == NULL || func == NULL ||
+ (flags != SMP_CALL_TYPE_SYNC && flags != SMP_CALL_TYPE_ASYNC))
+ return -EINVAL;
+
+ /* Try for same CPU (cheapest) */
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, mask))
+ goto call;
+
+ /* Try for same node. */
+ nodemask = cpumask_of_node(cpu_to_node(cpu));
+ for (cpu = cpumask_first_and(nodemask, mask); (unsigned int)cpu < nr_cpu_ids;
+ cpu = cpumask_next_and(cpu, nodemask, mask)) {
+ if (cpu_online(cpu))
+ goto call;
+ }
+
+ /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+ if ((unsigned int)cpu >= nr_cpu_ids) {
+ preempt_enable();
+ return -ENXIO;
+ }
+
+call:
+ (void) smp_call(cpu, func, info, flags);
+
+ preempt_enable();
+ return 0;
}
EXPORT_SYMBOL(smp_call_any);
--
2.27.0
next prev parent reply other threads:[~2022-04-22 21:57 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-22 20:00 [PATCH v2 00/11] smp: cross CPU call interface Donghai Qiao
2022-04-22 20:00 ` [PATCH v2 01/11] smp: consolidate the structure definitions to smp.h Donghai Qiao
2022-04-23 4:57 ` kernel test robot
2022-04-25 8:39 ` Peter Zijlstra
2022-04-25 9:52 ` Thomas Gleixner
2022-04-22 20:00 ` [PATCH v2 02/11] smp: define the cross call interface Donghai Qiao
2022-04-25 9:05 ` Peter Zijlstra
2022-04-22 20:00 ` [PATCH v2 03/11] smp: eliminate SCF_WAIT and SCF_RUN_LOCAL Donghai Qiao
2022-04-25 9:10 ` Peter Zijlstra
2022-04-22 20:00 ` [PATCH v2 04/11] smp: replace smp_call_function_single() with smp_call() Donghai Qiao
2022-04-25 9:33 ` Peter Zijlstra
2022-04-22 20:00 ` [PATCH v2 05/11] smp: replace smp_call_function_single_async() with smp_call_private() Donghai Qiao
2022-04-23 7:30 ` kernel test robot
2022-04-24 22:06 ` Nathan Chancellor
2022-04-24 22:06 ` Nathan Chancellor
2022-04-25 9:35 ` Peter Zijlstra
2022-04-22 20:00 ` [PATCH v2 06/11] smp: use smp_call_private() fron irq_work.c and core.c Donghai Qiao
2022-04-25 9:37 ` Peter Zijlstra
2022-04-22 20:00 ` Donghai Qiao [this message]
2022-04-22 20:00 ` [PATCH v2 08/11] smp: replace smp_call_function_many_cond() with __smp_call_mask_cond() Donghai Qiao
2022-04-22 20:00 ` [PATCH v2 09/11] smp: replace smp_call_function_single_async with smp_call_private Donghai Qiao
2022-04-22 20:00 ` [PATCH v2 10/11] smp: replace smp_call_function_single() with smp_call() Donghai Qiao
2022-04-22 20:00 ` [PATCH v2 11/11] smp: modify up.c to adopt the same format of cross CPU call Donghai Qiao
2022-04-23 5:17 ` kernel test robot
2022-04-23 5:58 ` kernel test robot
2022-04-26 14:00 ` [PATCH v2 00/11] smp: cross CPU call interface Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220422200040.93813-8-dqiao@redhat.com \
--to=dqiao@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=andriy.shevchenko@linux.intel.com \
--cc=arnd@arndb.de \
--cc=axboe@kernel.dk \
--cc=donghai.w.qiao@gmail.com \
--cc=gor@linux.ibm.com \
--cc=heying24@huawei.com \
--cc=linux-kernel@vger.kernel.org \
--cc=peterz@infradead.org \
--cc=rdunlap@infradead.org \
--cc=sfr@canb.auug.org.au \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.