All of lore.kernel.org
 help / color / mirror / Atom feed
From: Viresh Kumar <viresh.kumar@linaro.org>
To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
Cc: Viresh Kumar <viresh.kumar@linaro.org>,
	Ionela Voinescu <ionela.voinescu@arm.com>,
	Peter Puhov <peter.puhov@linaro.org>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org
Subject: [RFC 1/3] arm64: topology: Add amu_counters_supported() helper
Date: Thu,  9 Jul 2020 15:43:33 +0530	[thread overview]
Message-ID: <f4b69674ee35cbec102857218d8409249c8f26c7.1594289009.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1594289009.git.viresh.kumar@linaro.org>

We would need to know earlier during the boot cycle if AMUs are
supported or not for all the CPUs, export a routine for that and move
code around to make it more readable.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 arch/arm64/kernel/topology.c | 108 ++++++++++++++++++-----------------
 1 file changed, 56 insertions(+), 52 deletions(-)

diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index b7da372819fc..74fde35b56ef 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -130,6 +130,9 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
 static cpumask_var_t amu_fie_cpus;
+static cpumask_var_t valid_cpus;
+static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
+#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
 
 /* Initialize counter reference per-cpu variables for the current CPU */
 void init_cpu_freq_invariance_counters(void)
@@ -140,26 +143,14 @@ void init_cpu_freq_invariance_counters(void)
 		       read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
 }
 
-static int validate_cpu_freq_invariance_counters(int cpu)
+static void setup_freq_invariance(int cpu)
 {
-	u64 max_freq_hz, ratio;
-
-	if (!cpu_has_amu_feat(cpu)) {
-		pr_debug("CPU%d: counters are not supported.\n", cpu);
-		return -EINVAL;
-	}
-
-	if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
-		     !per_cpu(arch_core_cycles_prev, cpu))) {
-		pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
-		return -EINVAL;
-	}
+	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+	u64 ratio;
 
-	/* Convert maximum frequency from KHz to Hz and validate */
-	max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000;
-	if (unlikely(!max_freq_hz)) {
-		pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
-		return -EINVAL;
+	if (!policy) {
+		pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
+		return;
 	}
 
 	/*
@@ -176,69 +167,75 @@ static int validate_cpu_freq_invariance_counters(int cpu)
 	 * be unlikely).
 	 */
 	ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
-	ratio = div64_u64(ratio, max_freq_hz);
+	ratio = div64_u64(ratio, policy->cpuinfo.max_freq * 1000);
 	if (!ratio) {
 		WARN_ONCE(1, "System timer frequency too low.\n");
-		return -EINVAL;
+		goto out;
 	}
 
 	per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
 
-	return 0;
-}
-
-static inline void update_amu_fie_cpus(int cpu, cpumask_var_t valid_cpus)
-{
-	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
-	if (!policy) {
-		pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
-		return;
-	}
-
 	if (cpumask_subset(policy->related_cpus, valid_cpus))
-		cpumask_or(amu_fie_cpus, policy->related_cpus,
-			   amu_fie_cpus);
+		cpumask_or(amu_fie_cpus, policy->related_cpus, amu_fie_cpus);
 
+out:
 	cpufreq_cpu_put(policy);
 }
 
-static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
-#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
+bool amu_counters_supported(void)
+{
+	return likely(cpumask_available(valid_cpus)) &&
+		cpumask_equal(valid_cpus, cpu_present_mask);
+}
 
-static int __init init_amu_fie(void)
+static int __init early_init_amu_fie(void)
 {
-	cpumask_var_t valid_cpus;
-	int ret = 0;
 	int cpu;
 
 	if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
 		return -ENOMEM;
 
-	if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
-		ret = -ENOMEM;
-		goto free_valid_mask;
-	}
-
 	for_each_present_cpu(cpu) {
-		if (validate_cpu_freq_invariance_counters(cpu))
+		if (!cpu_has_amu_feat(cpu)) {
+			pr_debug("CPU%d: counters are not supported.\n", cpu);
+			continue;
+		}
+
+		if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
+			     !per_cpu(arch_core_cycles_prev, cpu))) {
+			pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
 			continue;
+		}
+
 		cpumask_set_cpu(cpu, valid_cpus);
-		update_amu_fie_cpus(cpu, valid_cpus);
 	}
 
+	return 0;
+}
+core_initcall_sync(early_init_amu_fie);
+
+static int __init late_init_amu_fie(void)
+{
+	int cpu;
+
+	if (!cpumask_available(valid_cpus))
+		return -ENOMEM;
+
+	if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
+		return -ENOMEM;
+
+	for_each_present_cpu(cpu)
+		setup_freq_invariance(cpu);
+
 	if (!cpumask_empty(amu_fie_cpus)) {
 		pr_info("CPUs[%*pbl]: counters will be used for FIE.",
 			cpumask_pr_args(amu_fie_cpus));
 		static_branch_enable(&amu_fie_key);
 	}
 
-free_valid_mask:
-	free_cpumask_var(valid_cpus);
-
-	return ret;
+	return 0;
 }
-late_initcall_sync(init_amu_fie);
+late_initcall_sync(late_init_amu_fie);
 
 bool arch_freq_counters_available(struct cpumask *cpus)
 {
@@ -272,7 +269,7 @@ void topology_scale_freq_tick(void)
 	 * scale =  ------- * --------------------
 	 *	    /\const   SCHED_CAPACITY_SCALE
 	 *
-	 * See validate_cpu_freq_invariance_counters() for details on
+	 * See setup_freq_invariance() for details on
 	 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
 	 */
 	scale = core_cnt - prev_core_cnt;
@@ -287,4 +284,11 @@ void topology_scale_freq_tick(void)
 	this_cpu_write(arch_core_cycles_prev, core_cnt);
 	this_cpu_write(arch_const_cycles_prev, const_cnt);
 }
+#else
+bool amu_counters_supported(void)
+{
+	return false;
+}
 #endif /* CONFIG_ARM64_AMU_EXTN */
+
+EXPORT_SYMBOL_GPL(amu_counters_supported);
-- 
2.25.0.rc1.19.g042ed3e048af


WARNING: multiple messages have this Message-ID (diff)
From: Viresh Kumar <viresh.kumar@linaro.org>
To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>,
	Viresh Kumar <viresh.kumar@linaro.org>,
	linux-kernel@vger.kernel.org,
	Peter Puhov <peter.puhov@linaro.org>,
	Ionela Voinescu <ionela.voinescu@arm.com>,
	linux-arm-kernel@lists.infradead.org
Subject: [RFC 1/3] arm64: topology: Add amu_counters_supported() helper
Date: Thu,  9 Jul 2020 15:43:33 +0530	[thread overview]
Message-ID: <f4b69674ee35cbec102857218d8409249c8f26c7.1594289009.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1594289009.git.viresh.kumar@linaro.org>

We would need to know earlier during the boot cycle if AMUs are
supported or not for all the CPUs, export a routine for that and move
code around to make it more readable.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 arch/arm64/kernel/topology.c | 108 ++++++++++++++++++-----------------
 1 file changed, 56 insertions(+), 52 deletions(-)

diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index b7da372819fc..74fde35b56ef 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -130,6 +130,9 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
 static cpumask_var_t amu_fie_cpus;
+static cpumask_var_t valid_cpus;
+static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
+#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
 
 /* Initialize counter reference per-cpu variables for the current CPU */
 void init_cpu_freq_invariance_counters(void)
@@ -140,26 +143,14 @@ void init_cpu_freq_invariance_counters(void)
 		       read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
 }
 
-static int validate_cpu_freq_invariance_counters(int cpu)
+static void setup_freq_invariance(int cpu)
 {
-	u64 max_freq_hz, ratio;
-
-	if (!cpu_has_amu_feat(cpu)) {
-		pr_debug("CPU%d: counters are not supported.\n", cpu);
-		return -EINVAL;
-	}
-
-	if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
-		     !per_cpu(arch_core_cycles_prev, cpu))) {
-		pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
-		return -EINVAL;
-	}
+	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+	u64 ratio;
 
-	/* Convert maximum frequency from KHz to Hz and validate */
-	max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000;
-	if (unlikely(!max_freq_hz)) {
-		pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
-		return -EINVAL;
+	if (!policy) {
+		pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
+		return;
 	}
 
 	/*
@@ -176,69 +167,75 @@ static int validate_cpu_freq_invariance_counters(int cpu)
 	 * be unlikely).
 	 */
 	ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
-	ratio = div64_u64(ratio, max_freq_hz);
+	ratio = div64_u64(ratio, policy->cpuinfo.max_freq * 1000);
 	if (!ratio) {
 		WARN_ONCE(1, "System timer frequency too low.\n");
-		return -EINVAL;
+		goto out;
 	}
 
 	per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
 
-	return 0;
-}
-
-static inline void update_amu_fie_cpus(int cpu, cpumask_var_t valid_cpus)
-{
-	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
-	if (!policy) {
-		pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
-		return;
-	}
-
 	if (cpumask_subset(policy->related_cpus, valid_cpus))
-		cpumask_or(amu_fie_cpus, policy->related_cpus,
-			   amu_fie_cpus);
+		cpumask_or(amu_fie_cpus, policy->related_cpus, amu_fie_cpus);
 
+out:
 	cpufreq_cpu_put(policy);
 }
 
-static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
-#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
+bool amu_counters_supported(void)
+{
+	return likely(cpumask_available(valid_cpus)) &&
+		cpumask_equal(valid_cpus, cpu_present_mask);
+}
 
-static int __init init_amu_fie(void)
+static int __init early_init_amu_fie(void)
 {
-	cpumask_var_t valid_cpus;
-	int ret = 0;
 	int cpu;
 
 	if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
 		return -ENOMEM;
 
-	if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
-		ret = -ENOMEM;
-		goto free_valid_mask;
-	}
-
 	for_each_present_cpu(cpu) {
-		if (validate_cpu_freq_invariance_counters(cpu))
+		if (!cpu_has_amu_feat(cpu)) {
+			pr_debug("CPU%d: counters are not supported.\n", cpu);
+			continue;
+		}
+
+		if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
+			     !per_cpu(arch_core_cycles_prev, cpu))) {
+			pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
 			continue;
+		}
+
 		cpumask_set_cpu(cpu, valid_cpus);
-		update_amu_fie_cpus(cpu, valid_cpus);
 	}
 
+	return 0;
+}
+core_initcall_sync(early_init_amu_fie);
+
+static int __init late_init_amu_fie(void)
+{
+	int cpu;
+
+	if (!cpumask_available(valid_cpus))
+		return -ENOMEM;
+
+	if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
+		return -ENOMEM;
+
+	for_each_present_cpu(cpu)
+		setup_freq_invariance(cpu);
+
 	if (!cpumask_empty(amu_fie_cpus)) {
 		pr_info("CPUs[%*pbl]: counters will be used for FIE.",
 			cpumask_pr_args(amu_fie_cpus));
 		static_branch_enable(&amu_fie_key);
 	}
 
-free_valid_mask:
-	free_cpumask_var(valid_cpus);
-
-	return ret;
+	return 0;
 }
-late_initcall_sync(init_amu_fie);
+late_initcall_sync(late_init_amu_fie);
 
 bool arch_freq_counters_available(struct cpumask *cpus)
 {
@@ -272,7 +269,7 @@ void topology_scale_freq_tick(void)
 	 * scale =  ------- * --------------------
 	 *	    /\const   SCHED_CAPACITY_SCALE
 	 *
-	 * See validate_cpu_freq_invariance_counters() for details on
+	 * See setup_freq_invariance() for details on
 	 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
 	 */
 	scale = core_cnt - prev_core_cnt;
@@ -287,4 +284,11 @@ void topology_scale_freq_tick(void)
 	this_cpu_write(arch_core_cycles_prev, core_cnt);
 	this_cpu_write(arch_const_cycles_prev, const_cnt);
 }
+#else
+bool amu_counters_supported(void)
+{
+	return false;
+}
 #endif /* CONFIG_ARM64_AMU_EXTN */
+
+EXPORT_SYMBOL_GPL(amu_counters_supported);
-- 
2.25.0.rc1.19.g042ed3e048af


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2020-07-09 10:14 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-09 10:13 [RFC 0/3] cpufreq: cppc: Add support for frequency invariance Viresh Kumar
2020-07-09 10:13 ` Viresh Kumar
2020-07-09 10:13 ` Viresh Kumar [this message]
2020-07-09 10:13   ` [RFC 1/3] arm64: topology: Add amu_counters_supported() helper Viresh Kumar
2020-07-09 10:13 ` [RFC 2/3] topology: Provide generic implementation of arch_freq_counters_available() Viresh Kumar
2020-07-09 10:13   ` Viresh Kumar
2020-07-09 10:13 ` [RFC 3/3] cpufreq: cppc: Add support for frequency invariance Viresh Kumar
2020-07-09 12:43 ` [RFC 0/3] " Ionela Voinescu
2020-07-09 12:43   ` Ionela Voinescu
2020-07-10  3:00   ` Viresh Kumar
2020-07-10  3:00     ` Viresh Kumar
2020-07-24  9:38     ` Vincent Guittot
2020-07-24  9:38       ` Vincent Guittot
2020-08-24 10:49       ` Viresh Kumar
2020-08-24 10:49         ` Viresh Kumar
2020-08-25  9:56       ` Ionela Voinescu
2020-08-25  9:56         ` Ionela Voinescu
2020-08-27  7:51         ` Viresh Kumar
2020-08-27  7:51           ` Viresh Kumar
2020-08-27 11:27           ` Ionela Voinescu
2020-08-27 11:27             ` Ionela Voinescu
2020-08-31 11:26             ` Viresh Kumar
2020-08-31 11:26               ` Viresh Kumar
2020-10-05  7:58             ` Viresh Kumar
2020-10-05  7:58               ` Viresh Kumar
2020-10-05 23:16               ` Ionela Voinescu
2020-10-05 23:16                 ` Ionela Voinescu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f4b69674ee35cbec102857218d8409249c8f26c7.1594289009.git.viresh.kumar@linaro.org \
    --to=viresh.kumar@linaro.org \
    --cc=catalin.marinas@arm.com \
    --cc=ionela.voinescu@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=peter.puhov@linaro.org \
    --cc=vincent.guittot@linaro.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.