linux-pm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Hector Martin <marcan@marcan.st>
To: "Rafael J. Wysocki" <rafael@kernel.org>,
	Viresh Kumar <viresh.kumar@linaro.org>,
	Matthias Brugger <matthias.bgg@gmail.com>
Cc: Hector Martin <marcan@marcan.st>, Sven Peter <sven@svenpeter.dev>,
	Alyssa Rosenzweig <alyssa@rosenzweig.io>,
	Rob Herring <robh+dt@kernel.org>,
	Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>,
	Stephen Boyd <sboyd@kernel.org>,
	Ulf Hansson <ulf.hansson@linaro.org>,
	Marc Zyngier <maz@kernel.org>,
	Mark Kettenis <mark.kettenis@xs4all.nl>,
	asahi@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
	linux-pm@vger.kernel.org, devicetree@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v3 3/5] cpufreq: Generalize of_perf_domain_get_sharing_cpumask phandle format
Date: Mon, 24 Oct 2022 13:39:23 +0900	[thread overview]
Message-ID: <20221024043925.25379-4-marcan@marcan.st> (raw)
In-Reply-To: <20221024043925.25379-1-marcan@marcan.st>

of_perf_domain_get_sharing_cpumask currently assumes a 1-argument
phandle format, and directly returns the argument. Generalize this to
return the full of_phandle_args, so it can be used by drivers which use
other phandle styles (e.g. separate nodes). This also requires changing
the CPU sharing match to compare the full args structure.

Also, make sure to of_node_put(args.np) (the original code was leaking a
reference).

Signed-off-by: Hector Martin <marcan@marcan.st>
---
 drivers/cpufreq/mediatek-cpufreq-hw.c | 14 +++++++++-----
 include/linux/cpufreq.h               | 28 +++++++++++++++------------
 2 files changed, 25 insertions(+), 17 deletions(-)

diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index f0e0a35c7f21..f80339779084 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -160,6 +160,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
 	struct mtk_cpufreq_data *data;
 	struct device *dev = &pdev->dev;
 	struct resource *res;
+	struct of_phandle_args args;
 	void __iomem *base;
 	int ret, i;
 	int index;
@@ -168,11 +169,14 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
 	if (!data)
 		return -ENOMEM;
 
-	index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
-						   "#performance-domain-cells",
-						   policy->cpus);
-	if (index < 0)
-		return index;
+	ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+						 "#performance-domain-cells",
+						 policy->cpus, &args);
+	if (ret < 0)
+		return ret;
+
+	index = args.args[0];
+	of_node_put(args.np);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
 	if (!res) {
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d5595d57f4e5..6a94a6eaad27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1110,10 +1110,10 @@ cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
 }
 
 static inline int parse_perf_domain(int cpu, const char *list_name,
-				    const char *cell_name)
+				    const char *cell_name,
+				    struct of_phandle_args *args)
 {
 	struct device_node *cpu_np;
-	struct of_phandle_args args;
 	int ret;
 
 	cpu_np = of_cpu_device_node_get(cpu);
@@ -1121,41 +1121,44 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
 		return -ENODEV;
 
 	ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
-					 &args);
+					 args);
 	if (ret < 0)
 		return ret;
 
 	of_node_put(cpu_np);
 
-	return args.args[0];
+	return 0;
 }
 
 static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
-						     const char *cell_name, struct cpumask *cpumask)
+						     const char *cell_name, struct cpumask *cpumask,
+						     struct of_phandle_args *pargs)
 {
-	int target_idx;
 	int cpu, ret;
+	struct of_phandle_args args;
 
-	ret = parse_perf_domain(pcpu, list_name, cell_name);
+	ret = parse_perf_domain(pcpu, list_name, cell_name, pargs);
 	if (ret < 0)
 		return ret;
 
-	target_idx = ret;
 	cpumask_set_cpu(pcpu, cpumask);
 
 	for_each_possible_cpu(cpu) {
 		if (cpu == pcpu)
 			continue;
 
-		ret = parse_perf_domain(cpu, list_name, cell_name);
+		ret = parse_perf_domain(cpu, list_name, cell_name, &args);
 		if (ret < 0)
 			continue;
 
-		if (target_idx == ret)
+		if (pargs->np == args.np && pargs->args_count == args.args_count &&
+		    !memcmp(pargs->args, args.args, sizeof(args.args[0]) * args.args_count))
 			cpumask_set_cpu(cpu, cpumask);
+
+		of_node_put(args.np);
 	}
 
-	return target_idx;
+	return 0;
 }
 #else
 static inline int cpufreq_boost_trigger_state(int state)
@@ -1185,7 +1188,8 @@ cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
 }
 
 static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
-						     const char *cell_name, struct cpumask *cpumask)
+						     const char *cell_name, struct cpumask *cpumask,
+						     struct of_phandle_args *pargs)
 {
 	return -EOPNOTSUPP;
 }
-- 
2.35.1


  parent reply	other threads:[~2022-10-24  4:40 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-24  4:39 [PATCH v3 0/5] Apple SoC cpufreq driver Hector Martin
2022-10-24  4:39 ` [PATCH v3 1/5] MAINTAINERS: Add entries for " Hector Martin
2022-10-24  4:39 ` [PATCH v3 2/5] dt-bindings: cpufreq: apple,soc-cpufreq: Add binding for Apple SoC cpufreq Hector Martin
2022-10-25 16:01   ` Krzysztof Kozlowski
2022-10-25 17:22     ` Hector Martin
2022-10-25 18:56       ` Krzysztof Kozlowski
2022-10-26  4:18         ` Hector Martin
2022-10-26 14:13           ` Krzysztof Kozlowski
2022-10-25 23:12       ` Rob Herring
2022-10-26  4:26         ` Hector Martin
2022-10-24  4:39 ` Hector Martin [this message]
2022-11-02  5:37   ` [PATCH v3 3/5] cpufreq: Generalize of_perf_domain_get_sharing_cpumask phandle format Viresh Kumar
2022-10-24  4:39 ` [PATCH v3 4/5] cpufreq: apple-soc: Add new driver to control Apple SoC CPU P-states Hector Martin
2022-10-24  8:27   ` Marc Zyngier
2022-11-09 12:13     ` Hector Martin
2022-11-09 14:20       ` Marc Zyngier
2022-11-09 15:39         ` Hector Martin
2022-11-01 15:16   ` Ulf Hansson
2022-11-01 18:17     ` Hector Martin
2022-11-14 19:49       ` Ulf Hansson
2022-11-02  6:18   ` Viresh Kumar
2022-11-09 12:36     ` Hector Martin
2022-11-14  6:51       ` Viresh Kumar
2022-11-14  6:57         ` Hector Martin
2022-11-14  7:03           ` Viresh Kumar
2022-11-14 11:06             ` Hector Martin
2022-10-24  4:39 ` [PATCH v3 5/5] arm64: dts: apple: Add CPU topology & cpufreq nodes for t8103 Hector Martin
2022-10-25 16:02   ` Krzysztof Kozlowski
2022-10-24  8:28 ` [PATCH v3 0/5] Apple SoC cpufreq driver Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221024043925.25379-4-marcan@marcan.st \
    --to=marcan@marcan.st \
    --cc=alyssa@rosenzweig.io \
    --cc=asahi@lists.linux.dev \
    --cc=devicetree@vger.kernel.org \
    --cc=krzysztof.kozlowski+dt@linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=mark.kettenis@xs4all.nl \
    --cc=matthias.bgg@gmail.com \
    --cc=maz@kernel.org \
    --cc=rafael@kernel.org \
    --cc=robh+dt@kernel.org \
    --cc=sboyd@kernel.org \
    --cc=sven@svenpeter.dev \
    --cc=ulf.hansson@linaro.org \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).