All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rob Herring <robh@kernel.org>
To: Sumit Gupta <sumitg@nvidia.com>
Cc: viresh.kumar@linaro.org, rafael@kernel.org, treding@nvidia.com,
	jonathanh@nvidia.com, linux-pm@vger.kernel.org,
	linux-tegra@vger.kernel.org, linux-kernel@vger.kernel.org,
	bbasu@nvidia.com, sanjayc@nvidia.com, ksitaraman@nvidia.com
Subject: Re: [Patch v2] cpufreq: tegra194: Add support for Tegra239
Date: Tue, 4 Oct 2022 10:27:14 -0500	[thread overview]
Message-ID: <20221004152714.GA1524940-robh@kernel.org> (raw)
In-Reply-To: <20220920110646.27837-1-sumitg@nvidia.com>

On Tue, Sep 20, 2022 at 04:36:46PM +0530, Sumit Gupta wrote:
> Adding support for Tegra239 SoC which has eight cores in
> a single cluster. Also, moving num_clusters to SoC data
> to avoid over allocating memory for four clusters always.
> 
> Signed-off-by: Sumit Gupta <sumitg@nvidia.com>
> Acked-by: Thierry Reding <treding@nvidia.com>
> ---
> v1 -> v2:
> - updated subject line and commit message.
> - changed type for 'num_clusters' from 'size_t' to 'unsigned int'.
> 
>  drivers/cpufreq/tegra194-cpufreq.c | 29 +++++++++++++++--------------
>  1 file changed, 15 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
> index 7e143c06972e..cfc1b225f811 100644
> --- a/drivers/cpufreq/tegra194-cpufreq.c
> +++ b/drivers/cpufreq/tegra194-cpufreq.c
> @@ -38,14 +38,6 @@
>  /* cpufreq transisition latency */
>  #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
>  
> -enum cluster {
> -	CLUSTER0,
> -	CLUSTER1,
> -	CLUSTER2,
> -	CLUSTER3,
> -	MAX_CLUSTERS,
> -};
> -
>  struct tegra_cpu_ctr {
>  	u32 cpu;
>  	u32 coreclk_cnt, last_coreclk_cnt;
> @@ -67,12 +59,12 @@ struct tegra_cpufreq_ops {
>  struct tegra_cpufreq_soc {
>  	struct tegra_cpufreq_ops *ops;
>  	int maxcpus_per_cluster;
> +	unsigned int num_clusters;
>  	phys_addr_t actmon_cntr_base;
>  };
>  
>  struct tegra194_cpufreq_data {
>  	void __iomem *regs;
> -	size_t num_clusters;
>  	struct cpufreq_frequency_table **tables;
>  	const struct tegra_cpufreq_soc *soc;
>  };
> @@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
>  	.ops = &tegra234_cpufreq_ops,
>  	.actmon_cntr_base = 0x9000,
>  	.maxcpus_per_cluster = 4,
> +	.num_clusters = 3,
> +};
> +
> +const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
> +	.ops = &tegra234_cpufreq_ops,
> +	.actmon_cntr_base = 0x4000,
> +	.maxcpus_per_cluster = 8,
> +	.num_clusters = 1,
>  };
>  
>  static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
> @@ -378,7 +378,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
>  
>  	data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
>  
> -	if (clusterid >= data->num_clusters || !data->tables[clusterid])
> +	if (clusterid >= data->soc->num_clusters || !data->tables[clusterid])
>  		return -EINVAL;
>  
>  	start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
> @@ -429,6 +429,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
>  static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
>  	.ops = &tegra194_cpufreq_ops,
>  	.maxcpus_per_cluster = 2,
> +	.num_clusters = 4,
>  };
>  
>  static void tegra194_cpufreq_free_resources(void)
> @@ -521,15 +522,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
>  
>  	soc = of_device_get_match_data(&pdev->dev);
>  
> -	if (soc->ops && soc->maxcpus_per_cluster) {
> +	if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
>  		data->soc = soc;
>  	} else {
>  		dev_err(&pdev->dev, "soc data missing\n");
>  		return -EINVAL;
>  	}
>  
> -	data->num_clusters = MAX_CLUSTERS;
> -	data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
> +	data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
>  				    sizeof(*data->tables), GFP_KERNEL);
>  	if (!data->tables)
>  		return -ENOMEM;
> @@ -554,7 +554,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
>  		goto put_bpmp;
>  	}
>  
> -	for (i = 0; i < data->num_clusters; i++) {
> +	for (i = 0; i < data->soc->num_clusters; i++) {
>  		data->tables[i] = init_freq_table(pdev, bpmp, i);
>  		if (IS_ERR(data->tables[i])) {
>  			err = PTR_ERR(data->tables[i]);
> @@ -586,6 +586,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev)
>  static const struct of_device_id tegra194_cpufreq_of_match[] = {
>  	{ .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
>  	{ .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
> +	{ .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },

Not documented.

Rob

  parent reply	other threads:[~2022-10-04 15:27 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-20 11:06 [Patch v2] cpufreq: tegra194: Add support for Tegra239 Sumit Gupta
2022-09-21  7:21 ` Viresh Kumar
2022-10-04 15:27 ` Rob Herring [this message]
2022-10-10  6:04   ` Viresh Kumar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221004152714.GA1524940-robh@kernel.org \
    --to=robh@kernel.org \
    --cc=bbasu@nvidia.com \
    --cc=jonathanh@nvidia.com \
    --cc=ksitaraman@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=rafael@kernel.org \
    --cc=sanjayc@nvidia.com \
    --cc=sumitg@nvidia.com \
    --cc=treding@nvidia.com \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.