All of lore.kernel.org
 help / color / mirror / Atom feed
From: Taniya Das <tdas@codeaurora.org>
To: Thara Gopinath <thara.gopinath@linaro.org>,
	agross@kernel.org, bjorn.andersson@linaro.org,
	rui.zhang@intel.com, daniel.lezcano@linaro.org,
	viresh.kumar@linaro.org, rjw@rjwysocki.net, robh+dt@kernel.org
Cc: linux-arm-msm@vger.kernel.org, linux-pm@vger.kernel.org,
	linux-kernel@vger.kernel.org, devicetree@vger.kernel.org
Subject: Re: [Patch v2 3/5] cpufreq: qcom-cpufreq-hw: Add dcvs interrupt support
Date: Tue, 29 Jun 2021 08:20:16 +0530	[thread overview]
Message-ID: <34542481-09b0-ae3b-25f1-77565bf47755@codeaurora.org> (raw)
In-Reply-To: <20210624115813.3613290-4-thara.gopinath@linaro.org>



On 6/24/2021 5:28 PM, Thara Gopinath wrote:
> Add interrupt support to notify the kernel of h/w initiated frequency
> throttling by LMh. Convey this to scheduler via thermal presssure
> interface.
> 
> Signed-off-by: Thara Gopinath <thara.gopinath@linaro.org>
> ---
> 
> v1->v2:
> 	- Introduced qcom_cpufreq_hw_lmh_init to consolidate LMh related initializations
> 	  as per Viresh's review comment.
> 	- Moved the piece of code restarting polling/re-enabling LMh interrupt to
> 	  qcom_lmh_dcvs_notify therby simplifying isr and timer callback as per Viresh's
> 	  suggestion.
> 	- Droped cpus from qcom_cpufreq_data and instead using cpus from cpufreq_policy in
> 	  qcom_lmh_dcvs_notify as per Viresh's review comment.
> 	- Dropped dt property qcom,support-lmh as per Bjorn's suggestion.
> 	- Other minor/cosmetic fixes
> 
>   drivers/cpufreq/qcom-cpufreq-hw.c | 103 ++++++++++++++++++++++++++++++
>   1 file changed, 103 insertions(+)
> 
> diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
> index f86859bf76f1..241f6f2b441f 100644
> --- a/drivers/cpufreq/qcom-cpufreq-hw.c
> +++ b/drivers/cpufreq/qcom-cpufreq-hw.c
> @@ -13,6 +13,7 @@
>   #include <linux/of_platform.h>
>   #include <linux/pm_opp.h>
>   #include <linux/slab.h>
> +#include <linux/interrupt.h>
>   
>   #define LUT_MAX_ENTRIES			40U
>   #define LUT_SRC				GENMASK(31, 30)
> @@ -22,10 +23,13 @@
>   #define CLK_HW_DIV			2
>   #define LUT_TURBO_IND			1
>   
> +#define HZ_PER_KHZ			1000
> +
>   struct qcom_cpufreq_soc_data {
>   	u32 reg_enable;
>   	u32 reg_freq_lut;
>   	u32 reg_volt_lut;
> +	u32 reg_current_vote;
>   	u32 reg_perf_state;
>   	u8 lut_row_size;
>   };
> @@ -33,7 +37,10 @@ struct qcom_cpufreq_soc_data {
>   struct qcom_cpufreq_data {
>   	void __iomem *base;
>   	struct resource *res;
> +	struct delayed_work lmh_dcvs_poll_work;
>   	const struct qcom_cpufreq_soc_data *soc_data;
> +	struct cpufreq_policy *policy;
> +	int lmh_dcvs_irq;
>   };
>   
>   static unsigned long cpu_hw_rate, xo_rate;
> @@ -251,10 +258,79 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
>   	}
>   }
>   
> +static inline unsigned long qcom_lmh_vote_to_freq(u32 val)
> +{
> +	return (val & 0x3FF) * 19200;
> +}
> +
> +static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
> +{
> +	struct cpufreq_policy *policy = data->policy;
> +	struct dev_pm_opp *opp;
> +	struct device *dev;
> +	unsigned long max_capacity, capacity, freq_hz, throttled_freq;
> +	unsigned int val, freq;
> +
> +	/*
> +	 * Get the h/w throttled frequency, normalize it using the
> +	 * registered opp table and use it to calculate thermal pressure.
> +	 */
> +	val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
> +	freq = qcom_lmh_vote_to_freq(val);
> +	freq_hz = freq * HZ_PER_KHZ;
> +
> +	dev = get_cpu_device(cpumask_first(policy->cpus));
> +	opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
> +	if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
> +		opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
> +
> +	throttled_freq = freq_hz / HZ_PER_KHZ;
> +
> +	/* Update thermal pressure */
> +	max_capacity = arch_scale_cpu_capacity(cpumask_first(policy->cpus));
> +	capacity = throttled_freq * max_capacity;
> +	capacity /= policy->cpuinfo.max_freq;
> +	/* Don't pass boost capacity to scheduler */
> +	if (capacity > max_capacity)
> +		capacity = max_capacity;
> +	arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
> +	/*
> +	 * If h/w throttled frequency is higher than what cpufreq has requested for, stop
> +	 * polling and switch back to interrupt mechanism
> +	 */
> +	if (throttled_freq >= qcom_cpufreq_hw_get(cpumask_first(policy->cpus)))
> +		/* Clear the existing interrupts and enable it back */
> +		enable_irq(data->lmh_dcvs_irq);
> +	else
> +		mod_delayed_work(system_highpri_wq, &data->lmh_dcvs_poll_work,
> +				 msecs_to_jiffies(10));
> +}
> +
> +static void qcom_lmh_dcvs_poll(struct work_struct *work)
> +{
> +	struct qcom_cpufreq_data *data;
> +
> +	data = container_of(work, struct qcom_cpufreq_data, lmh_dcvs_poll_work.work);
> +
> +	qcom_lmh_dcvs_notify(data);
> +}
> +
> +static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
> +{
> +	struct qcom_cpufreq_data *c_data = data;
> +
> +	/* Disable interrupt and enable polling */
> +	disable_irq_nosync(c_data->lmh_dcvs_irq);
> +	qcom_lmh_dcvs_notify(c_data);
> +
> +	return 0;
> +}
> +
>   static const struct qcom_cpufreq_soc_data qcom_soc_data = {
>   	.reg_enable = 0x0,
>   	.reg_freq_lut = 0x110,
>   	.reg_volt_lut = 0x114,
> +	.reg_current_vote = 0x704,
>   	.reg_perf_state = 0x920,
>   	.lut_row_size = 32,
>   };
> @@ -274,6 +350,23 @@ static const struct of_device_id qcom_cpufreq_hw_match[] = {
>   };
>   MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
>   
> +static void qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy)
> +{
> +	struct qcom_cpufreq_data *data = policy->driver_data;
> +	struct platform_device *pdev = cpufreq_get_driver_data();
> +	struct device *dev = &pdev->dev;
> +	int ret;
> +
> +	ret = devm_request_irq(dev, data->lmh_dcvs_irq, qcom_lmh_dcvs_handle_irq,
> +			       0, "dcvsh-irq", data);


It is better if you tag the CPU id while registering the IRQ.
"dcvsh-irq-x" (0/4/7)

> +	if (ret) {
> +		dev_err(dev, "Error %d registering irq %x\n", ret, data->lmh_dcvs_irq);
> +		return;
> +	}
> +	data->policy = policy;
> +	INIT_DEFERRABLE_WORK(&data->lmh_dcvs_poll_work, qcom_lmh_dcvs_poll);
> +}
> +
>   static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
>   {
>   	struct platform_device *pdev = cpufreq_get_driver_data();
> @@ -370,6 +463,16 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
>   			dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
>   	}
>   
> +	/* Look for LMh interrupt. If no interrupt line is specified /
> +	 * if there is an error, allow cpufreq to be enabled as usual.
> +	 */
> +	data->lmh_dcvs_irq = platform_get_irq(pdev, index);
> +	if (data->lmh_dcvs_irq > 0) {
> +		qcom_cpufreq_hw_lmh_init(policy);
> +	} else if (data->lmh_dcvs_irq != -ENXIO) {
> +		ret = data->lmh_dcvs_irq;
> +		goto error;
> +	}
>   	return 0;
>   error:
>   	kfree(data);
> 

-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation.

--

  parent reply	other threads:[~2021-06-29  2:50 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-24 11:58 [Patch v2 0/5] Introduce LMh driver for Qualcomm SoCs Thara Gopinath
2021-06-24 11:58 ` [Patch v2 1/5] firmware: qcom_scm: Introduce SCM calls to access LMh Thara Gopinath
2021-06-24 17:48   ` Matthias Kaehlcke
2021-06-25 15:45     ` Thara Gopinath
2021-06-24 11:58 ` [Patch v2 2/5] thermal: qcom: Add support for LMh driver Thara Gopinath
2021-06-24 17:24   ` Matthias Kaehlcke
2021-06-30  3:06     ` Thara Gopinath
2021-06-24 11:58 ` [Patch v2 3/5] cpufreq: qcom-cpufreq-hw: Add dcvs interrupt support Thara Gopinath
2021-06-29  2:35   ` Viresh Kumar
2021-06-30  2:25     ` Thara Gopinath
2021-06-30  3:53       ` Viresh Kumar
2021-06-29  2:50   ` Taniya Das [this message]
2021-06-30  2:27     ` Thara Gopinath
2021-06-24 11:58 ` [Patch v2 4/5] arm64: boot: dts: qcom: sdm45: Add support for LMh node Thara Gopinath
2021-06-24 11:58 ` [Patch v2 5/5] arm64: boot: dts: qcom: sdm845: Remove passive trip points for thermal zones 0-7 Thara Gopinath
2021-06-24 16:51   ` Matthias Kaehlcke
2021-06-25 15:44     ` Thara Gopinath

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=34542481-09b0-ae3b-25f1-77565bf47755@codeaurora.org \
    --to=tdas@codeaurora.org \
    --cc=agross@kernel.org \
    --cc=bjorn.andersson@linaro.org \
    --cc=daniel.lezcano@linaro.org \
    --cc=devicetree@vger.kernel.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=rjw@rjwysocki.net \
    --cc=robh+dt@kernel.org \
    --cc=rui.zhang@intel.com \
    --cc=thara.gopinath@linaro.org \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.