From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
To: linux-kernel@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
Jason Cooper <jason@lakedaemon.net>,
Marc Zyngier <marc.zyngier@arm.com>,
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: [PATCH 2/2] irqchip/gic-v3-its: Move ITS' ->pend_page allocation into an early CPU up callback
Date: Wed, 18 Jul 2018 17:42:05 +0200 [thread overview]
Message-ID: <20180718154205.13704-3-bigeasy@linutronix.de> (raw)
In-Reply-To: <20180718154205.13704-1-bigeasy@linutronix.de>
The AP-GIC-starting callback allocates memory for the ->pend_page while
the CPU is started during boot-up. This callback is invoked on the
target CPU with disabled interrupts.
This does not work on -RT because memory allocations are not possible
with disabled interrupts.
Move the memory allocation to an earlier hotplug step which is invoked
with enabled interrupts on the boot CPU.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/irqchip/irq-gic-v3-its.c | 60 ++++++++++++++++++++++----------
1 file changed, 41 insertions(+), 19 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index a616043d25ee..acc3d44c356d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -171,6 +171,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
@@ -1853,15 +1854,17 @@ static int its_alloc_collections(struct its_node *its)
return 0;
}
-static struct page *its_allocate_pending_table(gfp_t gfp_flags)
+static struct page *its_allocate_pending_table(unsigned int cpu)
{
struct page *pend_page;
+ unsigned int order;
/*
* The pending pages have to be at least 64kB aligned,
* hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
*/
- pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ order = get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K));
+ pend_page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_ZERO,
+ order);
if (!pend_page)
return NULL;
@@ -1877,6 +1880,28 @@ static void its_free_pending_table(struct page *pt)
get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
}
+static int its_alloc_pend_page(unsigned int cpu)
+{
+ struct page *pend_page;
+ phys_addr_t paddr;
+
+ pend_page = gic_data_rdist_cpu(cpu)->pend_page;
+ if (pend_page)
+ return 0;
+
+ pend_page = its_allocate_pending_table(cpu);
+ if (!pend_page) {
+ pr_err("Failed to allocate PENDBASE for CPU%d\n",
+ smp_processor_id());
+ return -ENOMEM;
+ }
+
+ paddr = page_to_phys(pend_page);
+ pr_info("CPU%d: using LPI pending table @%pa\n", cpu, &paddr);
+ gic_data_rdist_cpu(cpu)->pend_page = pend_page;
+ return 0;
+}
+
static void its_cpu_init_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
@@ -1885,21 +1910,8 @@ static void its_cpu_init_lpis(void)
/* If we didn't allocate the pending table yet, do it now */
pend_page = gic_data_rdist()->pend_page;
- if (!pend_page) {
- phys_addr_t paddr;
-
- pend_page = its_allocate_pending_table(GFP_NOWAIT);
- if (!pend_page) {
- pr_err("Failed to allocate PENDBASE for CPU%d\n",
- smp_processor_id());
- return;
- }
-
- paddr = page_to_phys(pend_page);
- pr_info("CPU%d: using LPI pending table @%pa\n",
- smp_processor_id(), &paddr);
- gic_data_rdist()->pend_page = pend_page;
- }
+ if (WARN_ON(!pend_page))
+ return;
/* set PROPBASE */
val = (page_to_phys(gic_rdists->prop_page) |
@@ -2732,7 +2744,7 @@ static int its_vpe_init(struct its_vpe *vpe)
return vpe_id;
/* Allocate VPT */
- vpt_page = its_allocate_pending_table(GFP_KERNEL);
+ vpt_page = its_allocate_pending_table(raw_smp_processor_id());
if (!vpt_page) {
its_vpe_id_free(vpe_id);
return -ENOMEM;
@@ -3706,6 +3718,16 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
if (err)
return err;
+ err = cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "irqchip/arm/gicv3:prepare",
+ its_alloc_pend_page, NULL);
+ if (err < 0) {
+ pr_warn("ITS: Can't register CPU-hoplug callback.\n");
+ return err;
+ }
+ err = its_alloc_pend_page(smp_processor_id());
+ if (err < 0)
+ return err;
+
list_for_each_entry(its, &its_nodes, entry)
has_v4 |= its->is_v4;
--
2.18.0
next prev parent reply other threads:[~2018-07-18 15:42 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-07-18 15:42 [PATCH 0/2] irqchip/gic-v3-its: a raw_spinlock_t + early ->pend_page allocateion Sebastian Andrzej Siewior
2018-07-18 15:42 ` [PATCH 1/2] irqchip/gic-v3-its: Make its_lock a raw_spin_lock_t Sebastian Andrzej Siewior
2018-08-06 8:17 ` Marc Zyngier
2018-07-18 15:42 ` Sebastian Andrzej Siewior [this message]
2018-08-06 8:15 ` [PATCH 2/2] irqchip/gic-v3-its: Move ITS' ->pend_page allocation into an early CPU up callback Marc Zyngier
2018-08-29 16:49 ` Sebastian Andrzej Siewior
2018-08-31 19:32 ` Sebastian Andrzej Siewior
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180718154205.13704-3-bigeasy@linutronix.de \
--to=bigeasy@linutronix.de \
--cc=jason@lakedaemon.net \
--cc=linux-kernel@vger.kernel.org \
--cc=marc.zyngier@arm.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).