From: Johan Hovold <johan+linaro@kernel.org>
To: Marc Zyngier <maz@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>,
x86@kernel.org, platform-driver-x86@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org,
linux-kernel@vger.kernel.org,
Johan Hovold <johan+linaro@kernel.org>
Subject: [PATCH v3 19/19] irqdomain: Switch to per-domain locking
Date: Fri, 9 Dec 2022 15:01:50 +0100 [thread overview]
Message-ID: <20221209140150.1453-20-johan+linaro@kernel.org> (raw)
In-Reply-To: <20221209140150.1453-1-johan+linaro@kernel.org>
The IRQ domain structures are currently protected by the global
irq_domain_mutex. Switch to using more fine-grained per-domain locking,
which may potentially speed up parallel probing somewhat.
Note that the domain lock of the root domain (innermost domain) must be
used for hierarchical domains. For non-hierarchical domain (as for root
domains), the new root pointer is set to the domain itself so that
domain->root->mutex can be used in shared code paths.
Also note that hierarchical domains should be constructed using
irq_domain_create_hierarchy() (or irq_domain_add_hierarchy()) to avoid
poking at irqdomain internals.
Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
---
include/linux/irqdomain.h | 4 ++++
kernel/irq/irqdomain.c | 48 ++++++++++++++++++++++-----------------
2 files changed, 31 insertions(+), 21 deletions(-)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 16399de00b48..cad47737a052 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -125,6 +125,8 @@ struct irq_domain_chip_generic;
* core code.
* @flags: Per irq_domain flags
* @mapcount: The number of mapped interrupts
+ * @mutex: Domain lock, hierarhical domains use root domain's lock
+ * @root: Pointer to root domain, or containing structure if non-hierarchical
*
* Optional elements:
* @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
@@ -152,6 +154,8 @@ struct irq_domain {
void *host_data;
unsigned int flags;
unsigned int mapcount;
+ struct mutex mutex;
+ struct irq_domain *root;
/* Optional data */
struct fwnode_handle *fwnode;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 6f2b8a1248e1..3faea8b66120 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -217,6 +217,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s
/* Fill structure */
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
+ mutex_init(&domain->mutex);
domain->ops = ops;
domain->host_data = host_data;
domain->hwirq_max = hwirq_max;
@@ -227,6 +228,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s
domain->revmap_size = size;
irq_domain_check_hierarchy(domain);
+ domain->root = domain;
mutex_lock(&irq_domain_mutex);
debugfs_add_domain_dir(domain);
@@ -503,7 +505,7 @@ static bool irq_domain_is_nomap(struct irq_domain *domain)
static void irq_domain_clear_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
- lockdep_assert_held(&irq_domain_mutex);
+ lockdep_assert_held(&domain->root->mutex);
if (irq_domain_is_nomap(domain))
return;
@@ -518,7 +520,7 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_data *irq_data)
{
- lockdep_assert_held(&irq_domain_mutex);
+ lockdep_assert_held(&domain->root->mutex);
if (irq_domain_is_nomap(domain))
return;
@@ -540,7 +542,7 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
hwirq = irq_data->hwirq;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->mutex);
irq_set_status_flags(irq, IRQ_NOREQUEST);
@@ -562,7 +564,7 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
/* Clear reverse map for this hwirq */
irq_domain_clear_mapping(domain, hwirq);
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->mutex);
}
static int __irq_domain_associate(struct irq_domain *domain, unsigned int virq,
@@ -612,9 +614,9 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
{
int ret;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->mutex);
ret = __irq_domain_associate(domain, virq, hwirq);
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->mutex);
return ret;
}
@@ -731,7 +733,7 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
return 0;
}
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->mutex);
/* Check if mapping already exists */
virq = irq_find_mapping(domain, hwirq);
@@ -742,7 +744,7 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
virq = __irq_create_mapping_affinity(domain, hwirq, affinity);
out:
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->mutex);
return virq;
}
@@ -811,7 +813,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
type &= IRQ_TYPE_SENSE_MASK;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->root->mutex);
/*
* If we've already configured this interrupt,
@@ -864,11 +866,11 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
/* Store trigger type */
irqd_set_trigger_type(irq_data, type);
out:
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
return virq;
err:
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
return 0;
}
@@ -1132,6 +1134,7 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
else
domain = irq_domain_create_tree(fwnode, ops, host_data);
if (domain) {
+ domain->root = parent->root;
domain->parent = parent;
domain->flags |= flags;
}
@@ -1528,10 +1531,10 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return -EINVAL;
}
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->root->mutex);
ret = ___irq_domain_alloc_irqs(domain, irq_base, nr_irqs, node, arg,
realloc, affinity);
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
return ret;
}
@@ -1542,7 +1545,7 @@ static void irq_domain_fix_revmap(struct irq_data *d)
{
void __rcu **slot;
- lockdep_assert_held(&irq_domain_mutex);
+ lockdep_assert_held(&d->domain->root->mutex);
if (irq_domain_is_nomap(d->domain))
return;
@@ -1608,7 +1611,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
if (!parent_irq_data)
return -ENOMEM;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->root->mutex);
/* Copy the original irq_data. */
*parent_irq_data = *irq_data;
@@ -1636,7 +1639,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
irq_domain_fix_revmap(parent_irq_data);
irq_domain_set_mapping(domain, irq_data->hwirq, irq_data);
error:
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
return rv;
}
@@ -1691,7 +1694,7 @@ int irq_domain_pop_irq(struct irq_domain *domain, int virq)
if (WARN_ON(!parent_irq_data))
return -EINVAL;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->root->mutex);
irq_data->parent_data = NULL;
@@ -1703,7 +1706,7 @@ int irq_domain_pop_irq(struct irq_domain *domain, int virq)
irq_domain_fix_revmap(irq_data);
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
kfree(parent_irq_data);
@@ -1719,17 +1722,20 @@ EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *data = irq_get_irq_data(virq);
+ struct irq_domain *domain;
int i;
if (WARN(!data || !data->domain || !data->domain->ops->free,
"NULL pointer, cannot free irq\n"))
return;
- mutex_lock(&irq_domain_mutex);
+ domain = data->domain;
+
+ mutex_lock(&domain->root->mutex);
for (i = 0; i < nr_irqs; i++)
irq_domain_remove_irq(virq + i);
- irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
- mutex_unlock(&irq_domain_mutex);
+ irq_domain_free_irqs_hierarchy(domain, virq, nr_irqs);
+ mutex_unlock(&domain->root->mutex);
irq_domain_free_irq_data(virq, nr_irqs);
irq_free_descs(virq, nr_irqs);
--
2.37.4
next prev parent reply other threads:[~2022-12-09 14:06 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-09 14:01 [PATCH v3 00/19] irqdomain: fix mapping race and clean up locking Johan Hovold
2022-12-09 14:01 ` [PATCH v3 01/19] irqdomain: Drop bogus fwspec-mapping error handling Johan Hovold
2022-12-09 14:01 ` [PATCH v3 02/19] irqdomain: Drop dead domain-name assignment Johan Hovold
2022-12-09 14:01 ` [PATCH v3 03/19] irqdomain: Drop leftover brackets Johan Hovold
2022-12-12 22:44 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` [PATCH v3 04/19] irqdomain: Fix association race Johan Hovold
2022-12-09 14:01 ` [PATCH v3 05/19] irqdomain: Fix disassociation race Johan Hovold
2022-12-09 14:01 ` [PATCH v3 06/19] irqdomain: Drop revmap mutex Johan Hovold
2022-12-09 14:01 ` [PATCH v3 07/19] irqdomain: Look for existing mapping only once Johan Hovold
2022-12-09 14:01 ` [PATCH v3 08/19] irqdomain: Refactor __irq_domain_alloc_irqs() Johan Hovold
2022-12-09 14:01 ` [PATCH v3 09/19] irqdomain: Fix mapping-creation race Johan Hovold
2022-12-09 14:01 ` [PATCH v3 10/19] irqdomain: Clean up irq_domain_push/pop_irq() Johan Hovold
2022-12-12 22:32 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` [PATCH v3 11/19] x86/ioapic: Use irq_domain_create_hierarchy() Johan Hovold
2022-12-12 22:37 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` [PATCH v3 12/19] x86/apic: " Johan Hovold
2022-12-12 22:41 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` [PATCH v3 13/19] irqchip/alpine-msi: Use irq_domain_add_hierarchy() Johan Hovold
2022-12-12 22:41 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` [PATCH v3 14/19] irqchip/gic-v2m: Use irq_domain_create_hierarchy() Johan Hovold
2022-12-12 22:42 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` [PATCH v3 15/19] irqchip/gic-v3-its: " Johan Hovold
2022-12-09 14:01 ` [PATCH v3 16/19] irqchip/gic-v3-mbi: " Johan Hovold
2022-12-12 22:42 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` [PATCH v3 17/19] irqchip/loongson-pch-msi: " Johan Hovold
2022-12-12 22:36 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` [PATCH v3 18/19] irqchip/mvebu-odmi: " Johan Hovold
2022-12-12 22:37 ` Philippe Mathieu-Daudé
2022-12-09 14:01 ` Johan Hovold [this message]
2022-12-12 14:14 ` [PATCH v3 19/19] irqdomain: Switch to per-domain locking Thomas Gleixner
2022-12-12 14:29 ` Johan Hovold
2022-12-12 16:18 ` Thomas Gleixner
2023-01-11 18:28 ` Thomas Gleixner
2023-01-12 17:21 ` Johan Hovold
2022-12-09 15:51 ` [PATCH v3 00/19] irqdomain: fix mapping race and clean up locking Thomas Gleixner
2022-12-09 16:16 ` Johan Hovold
2022-12-09 19:53 ` Thomas Gleixner
2022-12-15 9:31 ` Hsin-Yi Wang
2023-01-16 13:53 ` Johan Hovold
2022-12-20 3:30 ` [PATCH v3 0/19] " Mark-PK Tsai
2023-01-16 13:55 ` Johan Hovold
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221209140150.1453-20-johan+linaro@kernel.org \
--to=johan+linaro@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=maz@kernel.org \
--cc=platform-driver-x86@vger.kernel.org \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).