linux-pci.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sumit Gupta <sumitg@nvidia.com>
To: <treding@nvidia.com>, <krzysztof.kozlowski@linaro.org>,
	<dmitry.osipenko@collabora.com>, <viresh.kumar@linaro.org>,
	<rafael@kernel.org>, <jonathanh@nvidia.com>, <robh+dt@kernel.org>,
	<lpieralisi@kernel.org>
Cc: <linux-kernel@vger.kernel.org>, <linux-tegra@vger.kernel.org>,
	<linux-pm@vger.kernel.org>, <devicetree@vger.kernel.org>,
	<linux-pci@vger.kernel.org>, <mmaddireddy@nvidia.com>,
	<kw@linux.com>, <bhelgaas@google.com>, <vidyas@nvidia.com>,
	<sanjayc@nvidia.com>, <ksitaraman@nvidia.com>, <ishah@nvidia.com>,
	<bbasu@nvidia.com>, <sumitg@nvidia.com>
Subject: [Patch v3 09/11] PCI: tegra194: add interconnect support in Tegra234
Date: Mon, 20 Mar 2023 23:54:39 +0530	[thread overview]
Message-ID: <20230320182441.11904-10-sumitg@nvidia.com> (raw)
In-Reply-To: <20230320182441.11904-1-sumitg@nvidia.com>

Add support to request DRAM bandwidth with Memory Interconnect
in Tegra234 SoC. The DRAM BW required for different modes depends
on speed (Gen-1/2/3/4) and width/lanes (x1/x2/x4/x8).

Suggested-by: Manikanta Maddireddy <mmaddireddy@nvidia.com>
Signed-off-by: Sumit Gupta <sumitg@nvidia.com>
---
 drivers/pci/controller/dwc/pcie-tegra194.c | 40 +++++++++++++++++-----
 1 file changed, 32 insertions(+), 8 deletions(-)

diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 09825b4a075e..d2513c9d3feb 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -15,6 +15,7 @@
 #include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
+#include <linux/interconnect.h>
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -287,6 +288,7 @@ struct tegra_pcie_dw {
 	unsigned int pex_rst_irq;
 	int ep_state;
 	long link_status;
+	struct icc_path *icc_path;
 };
 
 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
@@ -309,6 +311,24 @@ struct tegra_pcie_soc {
 	enum dw_pcie_device_mode mode;
 };
 
+static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	u32 val, speed, width;
+
+	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
+	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+
+	val = width * (PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]) / BITS_PER_BYTE);
+
+	if (icc_set_bw(pcie->icc_path, MBps_to_icc(val), 0))
+		dev_err(pcie->dev, "can't set bw[%u]\n", val);
+
+	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+}
+
 static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
 {
 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -452,14 +472,12 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
 	struct tegra_pcie_dw *pcie = arg;
 	struct dw_pcie_ep *ep = &pcie->pci.ep;
 	struct dw_pcie *pci = &pcie->pci;
-	u32 val, speed;
+	u32 val;
 
 	if (test_and_clear_bit(0, &pcie->link_status))
 		dw_pcie_ep_linkup(ep);
 
-	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
-		PCI_EXP_LNKSTA_CLS;
-	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+	tegra_pcie_icc_set(pcie);
 
 	if (pcie->of_data->has_ltr_req_fix)
 		return IRQ_HANDLED;
@@ -945,9 +963,9 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
 
 static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
 {
-	u32 val, offset, speed, tmp;
 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
 	struct dw_pcie_rp *pp = &pci->pp;
+	u32 val, offset, tmp;
 	bool retry = true;
 
 	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
@@ -1018,9 +1036,7 @@ static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
 		goto retry_link;
 	}
 
-	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
-		PCI_EXP_LNKSTA_CLS;
-	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+	tegra_pcie_icc_set(pcie);
 
 	tegra_pcie_enable_interrupts(pp);
 
@@ -2224,6 +2240,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, pcie);
 
+	pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
+	ret = PTR_ERR_OR_ZERO(pcie->icc_path);
+	if (ret) {
+		tegra_bpmp_put(pcie->bpmp);
+		dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
+		return ret;
+	}
+
 	switch (pcie->of_data->mode) {
 	case DW_PCIE_RC_TYPE:
 		ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
-- 
2.17.1


  parent reply	other threads:[~2023-03-20 18:36 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-20 18:24 [Patch v3 00/11] Tegra234 Memory interconnect support Sumit Gupta
2023-03-20 18:24 ` [Patch v3 01/11] firmware: tegra: add function to get BPMP data Sumit Gupta
2023-03-23 10:08   ` Thierry Reding
2023-03-23 12:59     ` Sumit Gupta
2023-03-20 18:24 ` [Patch v3 02/11] memory: tegra: add interconnect support for DRAM scaling in Tegra234 Sumit Gupta
2023-03-23 10:14   ` Thierry Reding
2023-03-20 18:24 ` [Patch v3 03/11] memory: tegra: add mc clients for Tegra234 Sumit Gupta
2023-03-20 18:24 ` [Patch v3 04/11] memory: tegra: add software mc clients in Tegra234 Sumit Gupta
2023-03-20 18:24 ` [Patch v3 05/11] dt-bindings: tegra: add icc ids for dummy MC clients Sumit Gupta
2023-03-20 18:24 ` [Patch v3 06/11] arm64: tegra: Add cpu OPP tables and interconnects property Sumit Gupta
2023-03-20 18:24 ` [Patch v3 07/11] cpufreq: tegra194: add OPP support and set bandwidth Sumit Gupta
2023-03-21  7:36   ` kernel test robot
2023-03-21 11:49     ` Sumit Gupta
2023-03-22 17:51       ` Krzysztof Kozlowski
2023-03-23 12:50         ` Sumit Gupta
2023-03-20 18:24 ` [Patch v3 08/11] memory: tegra: make cpu cluster bw request a multiple of mc channels Sumit Gupta
2023-03-20 18:24 ` Sumit Gupta [this message]
2023-03-20 18:24 ` [Patch v3 10/11] memory: tegra: handle no BWMGR MRQ support in BPMP Sumit Gupta
2023-03-22 17:50   ` Krzysztof Kozlowski
2023-03-23  9:55     ` Thierry Reding
2023-03-23  9:58       ` Krzysztof Kozlowski
2023-03-23 10:02         ` Thierry Reding
2023-03-23 12:46           ` Sumit Gupta
2023-03-20 18:24 ` [Patch v3 11/11] memory: tegra186-emc: fix interconnect registration race Sumit Gupta
2023-03-22 17:50   ` Krzysztof Kozlowski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230320182441.11904-10-sumitg@nvidia.com \
    --to=sumitg@nvidia.com \
    --cc=bbasu@nvidia.com \
    --cc=bhelgaas@google.com \
    --cc=devicetree@vger.kernel.org \
    --cc=dmitry.osipenko@collabora.com \
    --cc=ishah@nvidia.com \
    --cc=jonathanh@nvidia.com \
    --cc=krzysztof.kozlowski@linaro.org \
    --cc=ksitaraman@nvidia.com \
    --cc=kw@linux.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=lpieralisi@kernel.org \
    --cc=mmaddireddy@nvidia.com \
    --cc=rafael@kernel.org \
    --cc=robh+dt@kernel.org \
    --cc=sanjayc@nvidia.com \
    --cc=treding@nvidia.com \
    --cc=vidyas@nvidia.com \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).