From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
To: Andy Gross <agross@kernel.org>,
Bjorn Andersson <bjorn.andersson@linaro.org>,
Rob Herring <robh+dt@kernel.org>,
Krzysztof Kozlowski <krzk@kernel.org>,
Jingoo Han <jingoohan1@gmail.com>,
Gustavo Pimentel <gustavo.pimentel@synopsys.com>,
Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>,
Bjorn Helgaas <bhelgaas@google.com>,
Stanimir Varbanov <svarbanov@mm-sol.com>,
Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Cc: Vinod Koul <vkoul@kernel.org>,
linux-arm-msm@vger.kernel.org, linux-pci@vger.kernel.org,
devicetree@vger.kernel.org
Subject: [PATCH v6 5/7] PCI: qcom: Handle MSIs routed to multiple GIC interrupts
Date: Thu, 5 May 2022 12:12:29 +0300 [thread overview]
Message-ID: <20220505091231.1308963-6-dmitry.baryshkov@linaro.org> (raw)
In-Reply-To: <20220505091231.1308963-1-dmitry.baryshkov@linaro.org>
On some of Qualcomm platforms each group of 32 MSI vectors is routed to the
separate GIC interrupt. Thus to receive higher MSI vectors properly,
add separate msi_host_init()/msi_host_deinit() handling additional host
IRQs.
Note, that if DT doesn't list extra MSI interrupts, the driver will limit
the amount of supported MSI vectors accordingly (to 32).
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
---
drivers/pci/controller/dwc/pcie-qcom.c | 137 ++++++++++++++++++++++++-
1 file changed, 136 insertions(+), 1 deletion(-)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 375f27ab9403..6cd1c2cc6d9e 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -194,6 +194,7 @@ struct qcom_pcie_ops {
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ unsigned int has_split_msi_irqs:1;
unsigned int pipe_clk_need_muxing:1;
unsigned int has_tbu_clk:1;
unsigned int has_ddrss_sf_tbu_clk:1;
@@ -209,6 +210,7 @@ struct qcom_pcie {
struct phy *phy;
struct gpio_desc *reset;
const struct qcom_pcie_cfg *cfg;
+ int msi_irqs[MAX_MSI_CTRLS];
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -1387,6 +1389,124 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
return 0;
}
+static void qcom_chained_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int irq = irq_desc_get_irq(desc);
+ struct pcie_port *pp;
+ int i, pos;
+ unsigned long val;
+ u32 status, num_ctrls;
+ struct dw_pcie *pci;
+ struct qcom_pcie *pcie;
+
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ pci = to_dw_pcie_from_pp(pp);
+ pcie = to_qcom_pcie(pci);
+
+ /*
+ * Unlike generic dw_handle_msi_irq(), we can determine which group of
+ * MSIs triggered the IRQ, so process just that group.
+ */
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ for (i = 0; i < num_ctrls; i++) {
+ if (pcie->msi_irqs[i] == irq)
+ break;
+ }
+
+ if (WARN_ON_ONCE(unlikely(i == num_ctrls)))
+ goto out;
+
+ status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!status)
+ goto out;
+
+ val = status;
+ pos = 0;
+ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
+ pos)) != MAX_MSI_IRQS_PER_CTRL) {
+ generic_handle_domain_irq(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
+ pos++;
+ }
+
+out:
+ chained_irq_exit(chip, desc);
+}
+
+static int qcom_pcie_msi_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ char irq_name[] = "msiXXX";
+ unsigned int ctrl, num_ctrls;
+ int msi_irq, ret;
+
+ pp->msi_irq = -EINVAL;
+
+ /*
+ * We provide our own implementation of MSI init/deinit, but rely on
+ * using the rest of DWC MSI functionality.
+ */
+ pp->has_msi_ctrl = true;
+
+ msi_irq = platform_get_irq_byname_optional(pdev, "msi");
+ if (msi_irq < 0) {
+ msi_irq = platform_get_irq(pdev, 0);
+ if (msi_irq < 0)
+ return msi_irq;
+ }
+
+ pcie->msi_irqs[0] = msi_irq;
+
+ for (num_ctrls = 1; num_ctrls < MAX_MSI_CTRLS; num_ctrls++) {
+ snprintf(irq_name, sizeof(irq_name), "msi%d", num_ctrls + 1);
+ msi_irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (msi_irq == -ENXIO)
+ break;
+
+ pcie->msi_irqs[num_ctrls] = msi_irq;
+ }
+
+ pp->num_vectors = num_ctrls * MAX_MSI_IRQS_PER_CTRL;
+ dev_info(&pdev->dev, "Using %d MSI vectors\n", pp->num_vectors);
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
+ ret = dw_pcie_allocate_msi(pp);
+ if (ret)
+ return ret;
+
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++)
+ irq_set_chained_handler_and_data(pcie->msi_irqs[ctrl],
+ qcom_chained_msi_isr,
+ pp);
+
+ return 0;
+}
+
+static void qcom_pcie_msi_host_deinit(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ unsigned int ctrl, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++)
+ irq_set_chained_handler_and_data(pcie->msi_irqs[ctrl],
+ NULL,
+ NULL);
+
+ dw_pcie_free_msi(pp);
+}
+
static int qcom_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -1435,6 +1555,12 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.host_init = qcom_pcie_host_init,
};
+static const struct dw_pcie_host_ops qcom_pcie_msi_dw_ops = {
+ .host_init = qcom_pcie_host_init,
+ .msi_host_init = qcom_pcie_msi_host_init,
+ .msi_host_deinit = qcom_pcie_msi_host_deinit,
+};
+
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
static const struct qcom_pcie_ops ops_2_1_0 = {
.get_resources = qcom_pcie_get_resources_2_1_0,
@@ -1508,6 +1634,7 @@ static const struct qcom_pcie_cfg ipq8064_cfg = {
static const struct qcom_pcie_cfg msm8996_cfg = {
.ops = &ops_2_3_2,
+ .has_split_msi_irqs = true,
};
static const struct qcom_pcie_cfg ipq8074_cfg = {
@@ -1520,6 +1647,7 @@ static const struct qcom_pcie_cfg ipq4019_cfg = {
static const struct qcom_pcie_cfg sdm845_cfg = {
.ops = &ops_2_7_0,
+ .has_split_msi_irqs = true,
.has_tbu_clk = true,
};
@@ -1532,12 +1660,14 @@ static const struct qcom_pcie_cfg sm8150_cfg = {
static const struct qcom_pcie_cfg sm8250_cfg = {
.ops = &ops_1_9_0,
+ .has_split_msi_irqs = true,
.has_tbu_clk = true,
.has_ddrss_sf_tbu_clk = true,
};
static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
.ops = &ops_1_9_0,
+ .has_split_msi_irqs = true,
.has_ddrss_sf_tbu_clk = true,
.pipe_clk_need_muxing = true,
.has_aggre0_clk = true,
@@ -1546,6 +1676,7 @@ static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
.ops = &ops_1_9_0,
+ .has_split_msi_irqs = true,
.has_ddrss_sf_tbu_clk = true,
.pipe_clk_need_muxing = true,
.has_aggre1_clk = true,
@@ -1553,6 +1684,7 @@ static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
static const struct qcom_pcie_cfg sc7280_cfg = {
.ops = &ops_1_9_0,
+ .has_split_msi_irqs = true,
.has_tbu_clk = true,
.pipe_clk_need_muxing = true,
};
@@ -1626,7 +1758,10 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret)
goto err_pm_runtime_put;
- pp->ops = &qcom_pcie_dw_ops;
+ if (pcie->cfg->has_split_msi_irqs)
+ pp->ops = &qcom_pcie_msi_dw_ops;
+ else
+ pp->ops = &qcom_pcie_dw_ops;
ret = phy_init(pcie->phy);
if (ret) {
--
2.35.1
next prev parent reply other threads:[~2022-05-05 9:12 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-05 9:12 [PATCH v6 0/7] PCI: qcom: Fix higher MSI vectors handling Dmitry Baryshkov
2022-05-05 9:12 ` [PATCH v6 1/7] PCI: qcom: Revert "PCI: qcom: Add support for handling MSIs from 8 endpoints" Dmitry Baryshkov
2022-05-05 9:12 ` [PATCH v6 2/7] PCI: dwc: Correct msi_irq condition in dw_pcie_free_msi() Dmitry Baryshkov
2022-05-05 9:12 ` [PATCH v6 3/7] PCI: dwc: Add msi_host_deinit callback Dmitry Baryshkov
2022-05-05 9:12 ` [PATCH v6 4/7] PCI: dwc: Export several functions useful for MSI implentations Dmitry Baryshkov
2022-05-05 9:37 ` Stanimir Varbanov
2022-05-05 9:12 ` Dmitry Baryshkov [this message]
2022-05-05 9:49 ` [PATCH v6 5/7] PCI: qcom: Handle MSIs routed to multiple GIC interrupts Stanimir Varbanov
2022-05-05 9:12 ` [PATCH v6 6/7] dt-bindings: PCI: qcom: Support additional MSI interrupts Dmitry Baryshkov
2022-05-05 9:12 ` [PATCH v6 7/7] arm64: dts: qcom: sm8250: provide " Dmitry Baryshkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220505091231.1308963-6-dmitry.baryshkov@linaro.org \
--to=dmitry.baryshkov@linaro.org \
--cc=agross@kernel.org \
--cc=bhelgaas@google.com \
--cc=bjorn.andersson@linaro.org \
--cc=devicetree@vger.kernel.org \
--cc=gustavo.pimentel@synopsys.com \
--cc=jingoohan1@gmail.com \
--cc=krzk@kernel.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
--cc=lorenzo.pieralisi@arm.com \
--cc=manivannan.sadhasivam@linaro.org \
--cc=robh+dt@kernel.org \
--cc=svarbanov@mm-sol.com \
--cc=vkoul@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).