From: "Pali Rohár" <pali@kernel.org>
To: "Thomas Gleixner" <tglx@linutronix.de>,
"Marc Zyngier" <maz@kernel.org>,
"Rob Herring" <robh+dt@kernel.org>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Andrew Lunn" <andrew@lunn.ch>,
"Gregory Clement" <gregory.clement@bootlin.com>,
"Sebastian Hesselbarth" <sebastian.hesselbarth@gmail.com>,
"Thomas Petazzoni" <thomas.petazzoni@bootlin.com>,
"Lorenzo Pieralisi" <lorenzo.pieralisi@arm.com>,
"Krzysztof Wilczyński" <kw@linux.com>,
"Marek Behún" <kabel@kernel.org>
Cc: linux-kernel@vger.kernel.org, devicetree@vger.kernel.org,
linux-pci@vger.kernel.org, linux-arm-kernel@lists.infradead.org
Subject: [PATCH 2/6] irqchip/armada-370-xp: Implement SoC Error interrupts
Date: Fri, 6 May 2022 15:40:25 +0200 [thread overview]
Message-ID: <20220506134029.21470-3-pali@kernel.org> (raw)
In-Reply-To: <20220506134029.21470-1-pali@kernel.org>
MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
domain for accessing this IRQ hierarchy.
Signed-off-by: Pali Rohár <pali@kernel.org>
---
drivers/irqchip/irq-armada-370-xp.c | 213 +++++++++++++++++++++++++++-
1 file changed, 210 insertions(+), 3 deletions(-)
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index ebd76ea1c69b..71578b65f5c8 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -117,6 +117,8 @@
/* Registers relative to main_int_base */
#define ARMADA_370_XP_INT_CONTROL (0x00)
#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
+#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS (0x20)
+#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS (0x24)
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
@@ -130,6 +132,8 @@
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
+#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF (0x50)
+#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF (0x54)
#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
@@ -146,6 +150,8 @@
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
+static struct irq_domain *armada_370_xp_soc_err_domain;
+static unsigned int soc_err_irq_num_regs;
static u32 doorbell_mask_reg;
static int parent_irq;
#ifdef CONFIG_PCI_MSI
@@ -156,6 +162,8 @@ static DEFINE_MUTEX(msi_used_lock);
static phys_addr_t msi_doorbell_addr;
#endif
+static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
+
static inline bool is_percpu_irq(irq_hw_number_t irq)
{
if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
@@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
armada_370_xp_irq_unmask(data);
}
+ /* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
+ for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
+ struct irq_data *data;
+ int virq;
+
+ virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
+ if (virq == 0)
+ continue;
+
+ data = irq_get_irq_data(virq);
+
+ if (!irq_percpu_is_enabled(virq))
+ continue;
+
+ armada_370_xp_soc_err_irq_unmask(data);
+ }
+
+ /* Unmask summary SoC Error Interrupt */
+ if (soc_err_irq_num_regs > 0)
+ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
ipi_resume();
}
@@ -546,8 +575,8 @@ static struct irq_chip armada_370_xp_irq_chip = {
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
- /* IRQs 0 and 1 cannot be mapped, they are handled internally */
- if (hw <= 1)
+ /* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
+ if (hw <= 1 || hw == 4)
return -EINVAL;
armada_370_xp_irq_mask(irq_get_irq_data(virq));
@@ -577,6 +606,99 @@ static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.xlate = irq_domain_xlate_onecell,
};
+static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
+
+static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 reg, mask;
+
+ reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
+ : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
+
+ raw_spin_lock(&armada_370_xp_soc_err_lock);
+ mask = readl(per_cpu_int_base + reg);
+ mask &= ~BIT(hwirq % 32);
+ writel(mask, per_cpu_int_base + reg);
+ raw_spin_unlock(&armada_370_xp_soc_err_lock);
+}
+
+static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 reg, mask;
+
+ reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
+ : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
+
+ raw_spin_lock(&armada_370_xp_soc_err_lock);
+ mask = readl(per_cpu_int_base + reg);
+ mask |= BIT(hwirq % 32);
+ writel(mask, per_cpu_int_base + reg);
+ raw_spin_unlock(&armada_370_xp_soc_err_lock);
+}
+
+static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
+{
+ struct irq_data *d = par;
+ armada_370_xp_soc_err_irq_mask(d);
+ return 0;
+}
+
+static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
+{
+ struct irq_data *d = par;
+ armada_370_xp_soc_err_irq_unmask(d);
+ return 0;
+}
+
+static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *mask,
+ bool force)
+{
+ unsigned int cpu;
+
+ cpus_read_lock();
+
+ /* First disable IRQ on all cores */
+ for_each_online_cpu(cpu)
+ smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
+
+ /* Select a single core from the affinity mask which is online */
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+ smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
+
+ cpus_read_unlock();
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip armada_370_xp_soc_err_irq_chip = {
+ .name = "MPIC SOC",
+ .irq_mask = armada_370_xp_soc_err_irq_mask,
+ .irq_unmask = armada_370_xp_soc_err_irq_unmask,
+ .irq_set_affinity = armada_xp_soc_err_irq_set_affinity,
+};
+
+static int armada_370_xp_soc_err_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ armada_370_xp_soc_err_irq_mask(irq_get_irq_data(virq));
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_percpu_devid(virq);
+ irq_set_chip_and_handler(virq, &armada_370_xp_soc_err_irq_chip,
+ handle_percpu_devid_irq);
+ irq_set_probe(virq);
+ return 0;
+}
+
+static const struct irq_domain_ops armada_370_xp_soc_err_irq_ops = {
+ .map = armada_370_xp_soc_err_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
#ifdef CONFIG_PCI_MSI
static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
{
@@ -605,6 +727,32 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
#endif
+static void armada_370_xp_handle_soc_err_irq(void)
+{
+ unsigned long status, bit;
+ u32 mask, cause;
+
+ if (soc_err_irq_num_regs < 1)
+ return;
+
+ mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF);
+ cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS);
+ status = cause & mask;
+
+ for_each_set_bit(bit, &status, 32)
+ generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit);
+
+ if (soc_err_irq_num_regs < 2)
+ return;
+
+ mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF);
+ cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS);
+ status = cause & mask;
+
+ for_each_set_bit(bit, &status, 32)
+ generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit + 32);
+}
+
static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -630,6 +778,11 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
continue;
}
+ if (irqn == 4) {
+ armada_370_xp_handle_soc_err_irq();
+ continue;
+ }
+
generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
}
@@ -649,7 +802,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
if (irqnr > 1022)
break;
- if (irqnr > 1) {
+ if (irqnr > 1 && irqnr != 4) {
generic_handle_domain_irq(armada_370_xp_mpic_domain,
irqnr);
continue;
@@ -659,6 +812,10 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
if (irqnr == 1)
armada_370_xp_handle_msi_irq(regs, false);
+ /* SoC Error handling */
+ if (irqnr == 4)
+ armada_370_xp_handle_soc_err_irq();
+
#ifdef CONFIG_SMP
/* IPI Handling */
if (irqnr == 0) {
@@ -722,6 +879,26 @@ static void armada_370_xp_mpic_resume(void)
}
}
+ /* Re-enable per-CPU SoC Error interrupts */
+ for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
+ struct irq_data *data;
+ int virq;
+
+ virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
+ if (virq == 0)
+ continue;
+
+ data = irq_get_irq_data(virq);
+
+ /*
+ * Re-enable on the current CPU,
+ * armada_xp_mpic_reenable_percpu() will take
+ * care of secondary CPUs when they come up.
+ */
+ if (irq_percpu_is_enabled(virq))
+ armada_370_xp_soc_err_irq_unmask(data);
+ }
+
/* Reconfigure doorbells for IPIs and MSIs */
writel(doorbell_mask_reg,
per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
@@ -730,6 +907,10 @@ static void armada_370_xp_mpic_resume(void)
if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ /* Unmask summary SoC Error Interrupt */
+ if (soc_err_irq_num_regs > 0)
+ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
ipi_resume();
}
@@ -742,6 +923,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource main_int_res, per_cpu_int_res;
+ struct device_node *soc_err_node;
int nr_irqs, i;
u32 control;
@@ -775,12 +957,37 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
BUG_ON(!armada_370_xp_mpic_domain);
irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
+ soc_err_node = of_get_next_child(node, NULL);
+ if (!soc_err_node) {
+ pr_warn("Missing SoC Error Interrupt Controller node\n");
+ pr_warn("Extended interrupts are not supported\n");
+ } else {
+ pr_info("Registering MPIC SoC Error Interrupt Controller\n");
+ /*
+ * Armada 370 and XP have only 32 SoC Error IRQs in one register
+ * and other Armada platforms have 64 IRQs in two registers.
+ */
+ soc_err_irq_num_regs =
+ of_machine_is_compatible("marvell,armada-370-xp") ? 1 : 2;
+ armada_370_xp_soc_err_domain =
+ irq_domain_add_hierarchy(armada_370_xp_mpic_domain, 0,
+ soc_err_irq_num_regs * 32,
+ soc_err_node,
+ &armada_370_xp_soc_err_irq_ops,
+ NULL);
+ BUG_ON(!armada_370_xp_soc_err_domain);
+ }
+
/* Setup for the boot CPU */
armada_xp_mpic_perf_init();
armada_xp_mpic_smp_cpu_init();
armada_370_xp_msi_init(node, main_int_res.start);
+ /* Unmask summary SoC Error Interrupt */
+ if (soc_err_irq_num_regs > 0)
+ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
parent_irq = irq_of_parse_and_map(node, 0);
if (parent_irq <= 0) {
irq_set_default_host(armada_370_xp_mpic_domain);
--
2.20.1
next prev parent reply other threads:[~2022-05-06 13:40 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-06 13:40 [PATCH 0/6] PCI: mvebu: Add support for PME and AER interrupts Pali Rohár
2022-05-06 13:40 ` [PATCH 1/6] dt-bindings: irqchip: armada-370-xp: Update information about MPIC SoC Error Pali Rohár
2022-05-17 0:18 ` Rob Herring
2022-05-06 13:40 ` Pali Rohár [this message]
2022-05-06 18:19 ` [PATCH 2/6] irqchip/armada-370-xp: Implement SoC Error interrupts Marc Zyngier
2022-05-06 18:30 ` Pali Rohár
2022-05-06 18:47 ` Marc Zyngier
2022-05-06 18:55 ` Pali Rohár
2022-05-07 9:01 ` Marc Zyngier
2022-05-07 9:20 ` Pali Rohár
2022-05-07 9:42 ` Marc Zyngier
2022-05-07 11:15 ` Pali Rohár
2022-05-09 23:12 ` Rob Herring
2022-05-09 8:51 ` Thomas Gleixner
2022-05-06 13:40 ` [PATCH 3/6] ARM: dts: armada-38x.dtsi: Add node for MPIC SoC Error IRQ controller Pali Rohár
2022-05-06 13:40 ` [PATCH 4/6] dt-bindings: PCI: mvebu: Update information about summary interrupt Pali Rohár
2022-05-06 13:40 ` [PATCH 5/6] PCI: mvebu: Implement support for interrupts on emulated bridge Pali Rohár
2022-05-06 13:40 ` [PATCH 6/6] ARM: dts: armada-385.dtsi: Add definitions for PCIe summary interrupts Pali Rohár
2022-05-06 14:22 ` [PATCH 0/6] PCI: mvebu: Add support for PME and AER interrupts Pali Rohár
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220506134029.21470-3-pali@kernel.org \
--to=pali@kernel.org \
--cc=andrew@lunn.ch \
--cc=bhelgaas@google.com \
--cc=devicetree@vger.kernel.org \
--cc=gregory.clement@bootlin.com \
--cc=kabel@kernel.org \
--cc=kw@linux.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
--cc=lorenzo.pieralisi@arm.com \
--cc=maz@kernel.org \
--cc=robh+dt@kernel.org \
--cc=sebastian.hesselbarth@gmail.com \
--cc=tglx@linutronix.de \
--cc=thomas.petazzoni@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).