From: Anup Patel <anup@brainfault.org>
To: Palmer Dabbelt <palmer@sifive.com>,
Albert Ou <aou@eecs.berkeley.edu>,
Daniel Lezcano <daniel.lezcano@linaro.org>,
Thomas Gleixner <tglx@linutronix.de>,
Jason Cooper <jason@lakedaemon.net>,
Marc Zyngier <marc.zyngier@arm.com>
Cc: Atish Patra <atish.patra@wdc.com>,
Christoph Hellwig <hch@infradead.org>,
linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org,
Anup Patel <anup@brainfault.org>
Subject: [PATCH v5 1/5] irqchip: sifive-plic: Pre-compute context hart base and enable base
Date: Sat, 19 Jan 2019 11:26:21 +0530 [thread overview]
Message-ID: <20190119055625.100054-2-anup@brainfault.org> (raw)
In-Reply-To: <20190119055625.100054-1-anup@brainfault.org>
This patch does following optimizations:
1. Pre-compute hart base for each context handler
2. Pre-compute enable base for each context handler
3. Have enable lock for each context handler instead
of global plic_toggle_lock
Signed-off-by: Anup Patel <anup@brainfault.org>
---
drivers/irqchip/irq-sifive-plic.c | 47 ++++++++++++++-----------------
1 file changed, 21 insertions(+), 26 deletions(-)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 357e9daf94ae..c23a293a2aae 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -59,37 +59,28 @@ static void __iomem *plic_regs;
struct plic_handler {
bool present;
- int ctxid;
+ void __iomem *hart_base;
+ /*
+ * Protect mask operations on the registers given that we can't
+ * assume atomic memory operations work on them.
+ */
+ raw_spinlock_t enable_lock;
+ void __iomem *enable_base;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
-static inline void __iomem *plic_hart_offset(int ctxid)
-{
- return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
-}
-
-static inline u32 __iomem *plic_enable_base(int ctxid)
-{
- return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
-}
-
-/*
- * Protect mask operations on the registers given that we can't assume that
- * atomic memory operations work on them.
- */
-static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
-
-static inline void plic_toggle(int ctxid, int hwirq, int enable)
+static inline void plic_toggle(struct plic_handler *handler,
+ int hwirq, int enable)
{
- u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
+ u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
u32 hwirq_mask = 1 << (hwirq % 32);
- raw_spin_lock(&plic_toggle_lock);
+ raw_spin_lock(&handler->enable_lock);
if (enable)
writel(readl(reg) | hwirq_mask, reg);
else
writel(readl(reg) & ~hwirq_mask, reg);
- raw_spin_unlock(&plic_toggle_lock);
+ raw_spin_unlock(&handler->enable_lock);
}
static inline void plic_irq_toggle(struct irq_data *d, int enable)
@@ -101,7 +92,7 @@ static inline void plic_irq_toggle(struct irq_data *d, int enable)
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present)
- plic_toggle(handler->ctxid, d->hwirq, enable);
+ plic_toggle(handler, d->hwirq, enable);
}
}
@@ -150,7 +141,7 @@ static struct irq_domain *plic_irqdomain;
static void plic_handle_irq(struct pt_regs *regs)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;
WARN_ON_ONCE(!handler->present);
@@ -239,12 +230,16 @@ static int __init plic_init(struct device_node *node,
cpu = riscv_hartid_to_cpuid(hartid);
handler = per_cpu_ptr(&plic_handlers, cpu);
handler->present = true;
- handler->ctxid = i;
+ handler->hart_base =
+ plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ raw_spin_lock_init(&handler->enable_lock);
+ handler->enable_base =
+ plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
/* priority must be > threshold to trigger an interrupt */
- writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
+ writel(0, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
- plic_toggle(i, hwirq, 0);
+ plic_toggle(handler, hwirq, 0);
nr_mapped++;
}
--
2.17.1
next prev parent reply other threads:[~2019-01-19 5:57 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-19 5:56 [PATCH v5 0/5] IRQ affinity support in PLIC driver Anup Patel
2019-01-19 5:56 ` Anup Patel [this message]
2019-02-12 7:11 ` [PATCH v5 1/5] irqchip: sifive-plic: Pre-compute context hart base and enable base Christoph Hellwig
2019-01-19 5:56 ` [PATCH v5 2/5] irqchip: sifive-plic: Don't inline plic_toggle() and plic_irq_toggle() Anup Patel
2019-02-12 7:09 ` Christoph Hellwig
2019-02-12 12:05 ` Anup Patel
2019-01-19 5:56 ` [PATCH v5 3/5] irqchip: sifive-plic: Add warning in plic_init() if handler already present Anup Patel
2019-01-19 5:56 ` [PATCH v5 4/5] irqchip: sifive-plic: Differentiate between PLIC handler and context Anup Patel
2019-02-12 7:09 ` Christoph Hellwig
2019-01-19 5:56 ` [PATCH v5 5/5] irqchip: sifive-plic: Implement irq_set_affinity() for SMP host Anup Patel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190119055625.100054-2-anup@brainfault.org \
--to=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=atish.patra@wdc.com \
--cc=daniel.lezcano@linaro.org \
--cc=hch@infradead.org \
--cc=jason@lakedaemon.net \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=marc.zyngier@arm.com \
--cc=palmer@sifive.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).