From: Sibi Sankar <quic_sibis@quicinc.com>
To: <bjorn.andersson@linaro.org>, <robh+dt@kernel.org>
Cc: <ohad@wizery.com>, <agross@kernel.org>,
<mathieu.poirier@linaro.org>, <linux-arm-msm@vger.kernel.org>,
<linux-remoteproc@vger.kernel.org>, <devicetree@vger.kernel.org>,
<linux-kernel@vger.kernel.org>, <evgreen@chromium.org>,
<dianders@chromium.org>, <swboyd@chromium.org>,
<mka@chromium.org>, <krzysztof.kozlowski@canonical.com>,
Sibi Sankar <quic_sibis@quicinc.com>
Subject: [PATCH 2/3] remoteproc: qcom_q6v5_mss: Add support for interconnect bandwidth voting
Date: Mon, 14 Feb 2022 10:04:11 +0530 [thread overview]
Message-ID: <1644813252-12897-3-git-send-email-quic_sibis@quicinc.com> (raw)
In-Reply-To: <1644813252-12897-1-git-send-email-quic_sibis@quicinc.com>
Add support for proxy interconnect bandwidth votes during modem bootup on
SC7280 SoCs.
Signed-off-by: Sibi Sankar <quic_sibis@quicinc.com>
---
drivers/remoteproc/qcom_q6v5_mss.c | 95 +++++++++++++++++++++++++++++++++++++-
1 file changed, 94 insertions(+), 1 deletion(-)
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index a2c231a17b2b..5a37628311c6 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -12,6 +12,7 @@
#include <linux/devcoredump.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/interconnect.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -125,6 +126,18 @@
#define QDSP6SS_BOOT_CMD 0x404
#define BOOT_FSM_TIMEOUT 10000
+struct interconnect_info {
+ struct icc_path *path;
+ u32 average_bandwidth;
+ u32 peak_bandwidth;
+};
+
+struct qcom_mss_icc_res {
+ const char *name;
+ u32 average_bandwidth;
+ u32 peak_bandwidth;
+};
+
struct reg_info {
struct regulator *reg;
int uV;
@@ -142,6 +155,7 @@ struct rproc_hexagon_res {
struct qcom_mss_reg_res *proxy_supply;
struct qcom_mss_reg_res *fallback_proxy_supply;
struct qcom_mss_reg_res *active_supply;
+ struct qcom_mss_icc_res *proxy_path;
char **proxy_clk_names;
char **reset_clk_names;
char **active_clk_names;
@@ -202,6 +216,9 @@ struct q6v5 {
int proxy_reg_count;
int fallback_proxy_reg_count;
+ struct interconnect_info interconnect[1];
+ int proxy_path_count;
+
bool dump_mba_loaded;
size_t current_dump_size;
size_t total_dump_size;
@@ -267,6 +284,29 @@ static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
return i;
}
+static int q6v5_interconnect_init(struct device *dev, struct interconnect_info *interconnect,
+ const struct qcom_mss_icc_res *icc_res)
+{
+ struct icc_path *path;
+ int i;
+
+ for (i = 0; icc_res[i].name; i++) {
+ path = devm_of_icc_get(dev, icc_res[i].name);
+ if (IS_ERR(path)) {
+ int ret = PTR_ERR(path);
+
+ dev_err_probe(dev, ret, "Failed to get %s interconnect\n", icc_res[i].name);
+ return ret;
+ }
+
+ interconnect[i].path = path;
+ interconnect[i].average_bandwidth = icc_res[i].average_bandwidth;
+ interconnect[i].peak_bandwidth = icc_res[i].peak_bandwidth;
+ }
+
+ return i;
+}
+
static int q6v5_regulator_enable(struct q6v5 *qproc,
struct reg_info *regs, int count)
{
@@ -364,6 +404,36 @@ static void q6v5_clk_disable(struct device *dev,
clk_disable_unprepare(clks[i]);
}
+static int q6v5_icc_enable(struct device *dev, struct interconnect_info *interconnect, int count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ ret = icc_set_bw(interconnect[i].path, interconnect[i].average_bandwidth,
+ interconnect[i].peak_bandwidth);
+ if (ret)
+ dev_err(dev, "Failed enabling %s interconnect\n",
+ icc_get_name(interconnect[i].path));
+ goto err;
+ }
+
+ return 0;
+err:
+ for (i--; i >= 0; i--)
+ icc_set_bw(interconnect[i].path, 0, 0);
+
+ return ret;
+}
+
+static void q6v5_icc_disable(struct device *dev, struct interconnect_info *interconnect, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ icc_set_bw(interconnect[i].path, 0, 0);
+}
+
static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
{
@@ -1011,10 +1081,14 @@ static int q6v5_mba_load(struct q6v5 *qproc)
if (ret)
return ret;
+ ret = q6v5_icc_enable(qproc->dev, qproc->interconnect, qproc->proxy_path_count);
+ if (ret)
+ goto disable_irqs;
+
ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
if (ret < 0) {
dev_err(qproc->dev, "failed to enable proxy power domains\n");
- goto disable_irqs;
+ goto disable_path;
}
ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
@@ -1158,6 +1232,8 @@ static int q6v5_mba_load(struct q6v5 *qproc)
qproc->fallback_proxy_reg_count);
disable_proxy_pds:
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+disable_path:
+ q6v5_icc_disable(qproc->dev, qproc->interconnect, qproc->proxy_path_count);
disable_irqs:
qcom_q6v5_unprepare(&qproc->q6v5);
@@ -1232,6 +1308,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
qproc->fallback_proxy_reg_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+ q6v5_icc_disable(qproc->dev, qproc->interconnect, qproc->proxy_path_count);
}
}
@@ -1611,6 +1688,7 @@ static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
qproc->fallback_proxy_reg_count);
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+ q6v5_icc_disable(qproc->dev, qproc->interconnect, qproc->proxy_path_count);
}
static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
@@ -1942,6 +2020,13 @@ static int q6v5_probe(struct platform_device *pdev)
}
qproc->active_reg_count = ret;
+ ret = q6v5_interconnect_init(&pdev->dev, qproc->interconnect, desc->proxy_path);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get proxy interconnects.\n");
+ goto free_rproc;
+ }
+ qproc->proxy_path_count = ret;
+
ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
desc->proxy_pd_names);
/* Fallback to regulators for old device trees */
@@ -2077,6 +2162,14 @@ static const struct rproc_hexagon_res sc7280_mss = {
"mss",
NULL
},
+ .proxy_path = (struct qcom_mss_icc_res[]) {
+ {
+ .name = "imem",
+ .average_bandwidth = 0,
+ .peak_bandwidth = 8532000,
+ },
+ {}
+ },
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = true,
--
2.7.4
next prev parent reply other threads:[~2022-02-14 4:34 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-14 4:34 [PATCH 0/3] Add support for proxy interconnect bandwidth votes Sibi Sankar
2022-02-14 4:34 ` [PATCH 1/3] dt-bindings: remoteproc: qcom: Add interconnects property Sibi Sankar
2022-02-14 7:28 ` Krzysztof Kozlowski
2022-02-14 4:34 ` Sibi Sankar [this message]
2022-02-22 18:33 ` [PATCH 2/3] remoteproc: qcom_q6v5_mss: Add support for interconnect bandwidth voting Bjorn Andersson
2022-02-14 4:34 ` [PATCH 3/3] arm64: dts: qcom: sc7280: Add proxy interconnect requirements for modem Sibi Sankar
2022-02-24 19:59 ` Bjorn Andersson
2022-04-04 23:17 ` [PATCH 0/3] Add support for proxy interconnect bandwidth votes Stephen Boyd
2022-05-10 10:59 ` Sibi Sankar
2022-05-10 11:03 ` Sibi Sankar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1644813252-12897-3-git-send-email-quic_sibis@quicinc.com \
--to=quic_sibis@quicinc.com \
--cc=agross@kernel.org \
--cc=bjorn.andersson@linaro.org \
--cc=devicetree@vger.kernel.org \
--cc=dianders@chromium.org \
--cc=evgreen@chromium.org \
--cc=krzysztof.kozlowski@canonical.com \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-remoteproc@vger.kernel.org \
--cc=mathieu.poirier@linaro.org \
--cc=mka@chromium.org \
--cc=ohad@wizery.com \
--cc=robh+dt@kernel.org \
--cc=swboyd@chromium.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).