From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: "Juergen Gross" <jgross@suse.com>,
"Andrew Cooper" <andrew.cooper3@citrix.com>,
"Wei Liu" <wl@xen.org>, "Jan Beulich" <jbeulich@suse.com>,
"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH 8/8] xen/x86: use keyhandler locks when dumping data to console
Date: Thu, 13 Feb 2020 13:54:49 +0100 [thread overview]
Message-ID: <20200213125449.14226-9-jgross@suse.com> (raw)
In-Reply-To: <20200213125449.14226-1-jgross@suse.com>
Instead of using the normal locks use the keyhandler provided trylocks
with timeouts.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
xen/arch/x86/io_apic.c | 53 +++++++++++++++++++++++++++++++++++++-------------
xen/arch/x86/irq.c | 5 ++++-
xen/arch/x86/msi.c | 4 +++-
xen/arch/x86/numa.c | 16 +++++++++------
4 files changed, 57 insertions(+), 21 deletions(-)
diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c
index e98e08e9c8..4acdc566b9 100644
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -1098,6 +1098,18 @@ static inline void UNEXPECTED_IO_APIC(void)
{
}
+static bool get_ioapic_lock(unsigned long *flags, bool boot)
+{
+ if ( boot )
+ {
+ spin_lock_irqsave(&ioapic_lock, *flags);
+ return true;
+ }
+
+ return keyhandler_spin_lock_irqsave(&ioapic_lock, flags,
+ "could not get ioapic lock");
+}
+
static void /*__init*/ __print_IO_APIC(bool boot)
{
int apic, i;
@@ -1125,13 +1137,16 @@ static void /*__init*/ __print_IO_APIC(bool boot)
if (!nr_ioapic_entries[apic])
continue;
- spin_lock_irqsave(&ioapic_lock, flags);
+ if ( !get_ioapic_lock(&flags, boot) )
+ continue;
+
reg_00.raw = io_apic_read(apic, 0);
reg_01.raw = io_apic_read(apic, 1);
if (reg_01.bits.version >= 0x10)
reg_02.raw = io_apic_read(apic, 2);
if (reg_01.bits.version >= 0x20)
reg_03.raw = io_apic_read(apic, 3);
+
spin_unlock_irqrestore(&ioapic_lock, flags);
printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
@@ -1201,7 +1216,12 @@ static void /*__init*/ __print_IO_APIC(bool boot)
for (i = 0; i <= reg_01.bits.entries; i++) {
struct IO_APIC_route_entry entry;
- entry = ioapic_read_entry(apic, i, 0);
+ if ( !get_ioapic_lock(&flags, boot) )
+ continue;
+
+ entry = __ioapic_read_entry(apic, i, 0);
+
+ spin_unlock_irqrestore(&ioapic_lock, flags);
if ( x2apic_enabled && iommu_intremap )
printk(KERN_DEBUG " %02x %08x", i, entry.dest.dest32);
@@ -2495,21 +2515,28 @@ void dump_ioapic_irq_info(void)
for ( ; ; )
{
+ unsigned long flags;
+
pin = entry->pin;
printk(" Apic 0x%02x, Pin %2d: ", entry->apic, pin);
- rte = ioapic_read_entry(entry->apic, pin, 0);
-
- printk("vec=%02x delivery=%-5s dest=%c status=%d "
- "polarity=%d irr=%d trig=%c mask=%d dest_id:%0*x\n",
- rte.vector, delivery_mode_2_str(rte.delivery_mode),
- rte.dest_mode ? 'L' : 'P',
- rte.delivery_status, rte.polarity, rte.irr,
- rte.trigger ? 'L' : 'E', rte.mask,
- (x2apic_enabled && iommu_intremap) ? 8 : 2,
- (x2apic_enabled && iommu_intremap) ?
- rte.dest.dest32 : rte.dest.logical.logical_dest);
+ if ( keyhandler_spin_lock_irqsave(&ioapic_lock, &flags,
+ "could not get ioapic lock") )
+ {
+ rte = __ioapic_read_entry(entry->apic, pin, 0);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ printk("vec=%02x delivery=%-5s dest=%c status=%d "
+ "polarity=%d irr=%d trig=%c mask=%d dest_id:%0*x\n",
+ rte.vector, delivery_mode_2_str(rte.delivery_mode),
+ rte.dest_mode ? 'L' : 'P',
+ rte.delivery_status, rte.polarity, rte.irr,
+ rte.trigger ? 'L' : 'E', rte.mask,
+ (x2apic_enabled && iommu_intremap) ? 8 : 2,
+ (x2apic_enabled && iommu_intremap) ?
+ rte.dest.dest32 : rte.dest.logical.logical_dest);
+ }
if ( entry->next == 0 )
break;
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index cc2eb8e925..f3d931b121 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -2470,7 +2470,9 @@ static void dump_irqs(unsigned char key)
ssid = in_irq() ? NULL : xsm_show_irq_sid(irq);
- spin_lock_irqsave(&desc->lock, flags);
+ if ( !keyhandler_spin_lock_irqsave(&desc->lock, &flags,
+ "could not get irq lock") )
+ goto free_ssid;
printk(" IRQ:%4d vec:%02x %-15s status=%03x aff:{%*pbl}/{%*pbl} ",
irq, desc->arch.vector, desc->handler->typename, desc->status,
@@ -2506,6 +2508,7 @@ static void dump_irqs(unsigned char key)
spin_unlock_irqrestore(&desc->lock, flags);
+ free_ssid:
xfree(ssid);
}
diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c
index c85cf9f85a..d10b856179 100644
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -1470,7 +1470,9 @@ static void dump_msi(unsigned char key)
if ( !irq_desc_initialized(desc) )
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ if ( !keyhandler_spin_lock_irqsave(&desc->lock, &flags,
+ "could not get irq lock") )
+ continue;
entry = desc->msi_desc;
if ( !entry )
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 6ef15b34d5..d21ed8737f 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -425,18 +425,22 @@ static void dump_numa(unsigned char key)
for_each_online_node ( i )
page_num_node[i] = 0;
- spin_lock(&d->page_alloc_lock);
- page_list_for_each(page, &d->page_list)
+ if ( keyhandler_spin_lock(&d->page_alloc_lock,
+ "could not get page_alloc lock") )
{
- i = phys_to_nid(page_to_maddr(page));
- page_num_node[i]++;
+ page_list_for_each(page, &d->page_list)
+ {
+ i = phys_to_nid(page_to_maddr(page));
+ page_num_node[i]++;
+ }
+ spin_unlock(&d->page_alloc_lock);
}
- spin_unlock(&d->page_alloc_lock);
for_each_online_node ( i )
printk(" Node %u: %u\n", i, page_num_node[i]);
- if ( !read_trylock(&d->vnuma_rwlock) )
+ if ( !keyhandler_read_lock(&d->vnuma_rwlock,
+ "could not get vnuma lock") )
continue;
if ( !d->vnuma )
--
2.16.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2020-02-13 12:55 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-13 12:54 [Xen-devel] [PATCH 0/8] xen: don't let keyhandlers block indefinitely on locks Juergen Gross
2020-02-13 12:54 ` [Xen-devel] [PATCH 1/8] xen: make rangeset_printk() static Juergen Gross
2020-02-13 14:00 ` Jan Beulich
2020-02-13 12:54 ` [Xen-devel] [PATCH 2/8] xen: add using domlist_read_lock in keyhandlers Juergen Gross
2020-02-13 14:01 ` Jan Beulich
2020-02-13 14:09 ` George Dunlap
2020-02-18 5:42 ` Tian, Kevin
2020-02-13 12:54 ` [Xen-devel] [PATCH 3/8] xen/sched: don't use irqsave locks in dumping functions Juergen Gross
2020-02-19 12:40 ` Dario Faggioli
2020-02-19 14:27 ` Jan Beulich
2020-02-19 15:02 ` Jürgen Groß
2020-02-19 15:47 ` Dario Faggioli
2020-02-13 12:54 ` [Xen-devel] [PATCH 4/8] xen: add locks with timeouts for keyhandlers Juergen Gross
2020-03-05 15:25 ` Jan Beulich
2020-03-06 8:08 ` Jürgen Groß
2020-03-06 8:15 ` Jürgen Groß
2020-02-13 12:54 ` [Xen-devel] [PATCH 5/8] xen/sched: use keyhandler locks when dumping data to console Juergen Gross
2020-02-19 14:31 ` Dario Faggioli
2020-02-19 15:09 ` Jürgen Groß
2020-02-13 12:54 ` [Xen-devel] [PATCH 6/8] xen/common: " Juergen Gross
2020-02-13 12:54 ` [Xen-devel] [PATCH 7/8] xen/drivers: " Juergen Gross
2020-02-13 12:54 ` Juergen Gross [this message]
2020-02-13 18:38 ` [Xen-devel] [PATCH 0/8] xen: don't let keyhandlers block indefinitely on locks Andrew Cooper
2020-02-14 6:05 ` Jürgen Groß
2020-02-14 9:37 ` Jan Beulich
2020-02-19 12:14 ` Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200213125449.14226-9-jgross@suse.com \
--to=jgross@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=roger.pau@citrix.com \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).