From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
George Dunlap <george.dunlap@eu.citrix.com>,
Meng Xu <mengxu@cis.upenn.edu>,
Dario Faggioli <dfaggioli@suse.com>
Subject: [Xen-devel] [PATCH 3/8] xen/sched: don't use irqsave locks in dumping functions
Date: Thu, 13 Feb 2020 13:54:44 +0100 [thread overview]
Message-ID: <20200213125449.14226-4-jgross@suse.com> (raw)
In-Reply-To: <20200213125449.14226-1-jgross@suse.com>
All dumping functions invoked by the "runq" keyhandler are called with
disabled interrupts, so there is no need to use the irqsave variants
of any locks in those functions.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
xen/common/sched/credit.c | 10 ++++------
xen/common/sched/credit2.c | 5 ++---
xen/common/sched/null.c | 10 ++++------
xen/common/sched/rt.c | 10 ++++------
4 files changed, 14 insertions(+), 21 deletions(-)
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index 05946eea6e..dee87e7fe2 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -2048,7 +2048,6 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
const struct csched_pcpu *spc;
const struct csched_unit *svc;
spinlock_t *lock;
- unsigned long flags;
int loop;
/*
@@ -2058,7 +2057,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
* - we scan through the runqueue, so we need the proper runqueue
* lock (the one of the runqueue of this cpu).
*/
- spin_lock_irqsave(&prv->lock, flags);
+ spin_lock(&prv->lock);
lock = pcpu_schedule_lock(cpu);
spc = CSCHED_PCPU(cpu);
@@ -2089,7 +2088,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
}
pcpu_schedule_unlock(lock, cpu);
- spin_unlock_irqrestore(&prv->lock, flags);
+ spin_unlock(&prv->lock);
}
static void
@@ -2098,9 +2097,8 @@ csched_dump(const struct scheduler *ops)
struct list_head *iter_sdom, *iter_svc;
struct csched_private *prv = CSCHED_PRIV(ops);
int loop;
- unsigned long flags;
- spin_lock_irqsave(&prv->lock, flags);
+ spin_lock(&prv->lock);
printk("info:\n"
"\tncpus = %u\n"
@@ -2153,7 +2151,7 @@ csched_dump(const struct scheduler *ops)
}
}
- spin_unlock_irqrestore(&prv->lock, flags);
+ spin_unlock(&prv->lock);
}
static int __init
diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index f2752f27e2..e76d2ed543 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -3649,14 +3649,13 @@ csched2_dump(const struct scheduler *ops)
{
struct list_head *iter_sdom;
struct csched2_private *prv = csched2_priv(ops);
- unsigned long flags;
unsigned int i, j, loop;
/*
* We need the private scheduler lock as we access global
* scheduler data and (below) the list of active domains.
*/
- read_lock_irqsave(&prv->lock, flags);
+ read_lock(&prv->lock);
printk("Active queues: %d\n"
"\tdefault-weight = %d\n",
@@ -3749,7 +3748,7 @@ csched2_dump(const struct scheduler *ops)
spin_unlock(&rqd->lock);
}
- read_unlock_irqrestore(&prv->lock, flags);
+ read_unlock(&prv->lock);
}
static void *
diff --git a/xen/common/sched/null.c b/xen/common/sched/null.c
index 8c3101649d..3b31703d7e 100644
--- a/xen/common/sched/null.c
+++ b/xen/common/sched/null.c
@@ -954,9 +954,8 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
const struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
const struct null_unit *nvc;
spinlock_t *lock;
- unsigned long flags;
- lock = pcpu_schedule_lock_irqsave(cpu, &flags);
+ lock = pcpu_schedule_lock(cpu);
printk("CPU[%02d] sibling={%*pbl}, core={%*pbl}",
cpu, CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
@@ -974,17 +973,16 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
printk("\n");
}
- pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
+ pcpu_schedule_unlock(lock, cpu);
}
static void null_dump(const struct scheduler *ops)
{
struct null_private *prv = null_priv(ops);
struct list_head *iter;
- unsigned long flags;
unsigned int loop;
- spin_lock_irqsave(&prv->lock, flags);
+ spin_lock(&prv->lock);
printk("\tcpus_free = %*pbl\n", CPUMASK_PR(&prv->cpus_free));
@@ -1029,7 +1027,7 @@ static void null_dump(const struct scheduler *ops)
printk("\n");
spin_unlock(&prv->waitq_lock);
- spin_unlock_irqrestore(&prv->lock, flags);
+ spin_unlock(&prv->lock);
}
static const struct scheduler sched_null_def = {
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index 66585ed50a..16379cb2d2 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -353,9 +353,8 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu)
{
struct rt_private *prv = rt_priv(ops);
const struct rt_unit *svc;
- unsigned long flags;
- spin_lock_irqsave(&prv->lock, flags);
+ spin_lock(&prv->lock);
printk("CPU[%02d]\n", cpu);
/* current UNIT (nothing to say if that's the idle unit). */
svc = rt_unit(curr_on_cpu(cpu));
@@ -363,7 +362,7 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu)
{
rt_dump_unit(ops, svc);
}
- spin_unlock_irqrestore(&prv->lock, flags);
+ spin_unlock(&prv->lock);
}
static void
@@ -373,9 +372,8 @@ rt_dump(const struct scheduler *ops)
struct rt_private *prv = rt_priv(ops);
const struct rt_unit *svc;
const struct rt_dom *sdom;
- unsigned long flags;
- spin_lock_irqsave(&prv->lock, flags);
+ spin_lock(&prv->lock);
if ( list_empty(&prv->sdom) )
goto out;
@@ -421,7 +419,7 @@ rt_dump(const struct scheduler *ops)
}
out:
- spin_unlock_irqrestore(&prv->lock, flags);
+ spin_unlock(&prv->lock);
}
/*
--
2.16.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2020-02-13 12:55 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-13 12:54 [Xen-devel] [PATCH 0/8] xen: don't let keyhandlers block indefinitely on locks Juergen Gross
2020-02-13 12:54 ` [Xen-devel] [PATCH 1/8] xen: make rangeset_printk() static Juergen Gross
2020-02-13 14:00 ` Jan Beulich
2020-02-13 12:54 ` [Xen-devel] [PATCH 2/8] xen: add using domlist_read_lock in keyhandlers Juergen Gross
2020-02-13 14:01 ` Jan Beulich
2020-02-13 14:09 ` George Dunlap
2020-02-18 5:42 ` Tian, Kevin
2020-02-13 12:54 ` Juergen Gross [this message]
2020-02-19 12:40 ` [Xen-devel] [PATCH 3/8] xen/sched: don't use irqsave locks in dumping functions Dario Faggioli
2020-02-19 14:27 ` Jan Beulich
2020-02-19 15:02 ` Jürgen Groß
2020-02-19 15:47 ` Dario Faggioli
2020-02-13 12:54 ` [Xen-devel] [PATCH 4/8] xen: add locks with timeouts for keyhandlers Juergen Gross
2020-03-05 15:25 ` Jan Beulich
2020-03-06 8:08 ` Jürgen Groß
2020-03-06 8:15 ` Jürgen Groß
2020-02-13 12:54 ` [Xen-devel] [PATCH 5/8] xen/sched: use keyhandler locks when dumping data to console Juergen Gross
2020-02-19 14:31 ` Dario Faggioli
2020-02-19 15:09 ` Jürgen Groß
2020-02-13 12:54 ` [Xen-devel] [PATCH 6/8] xen/common: " Juergen Gross
2020-02-13 12:54 ` [Xen-devel] [PATCH 7/8] xen/drivers: " Juergen Gross
2020-02-13 12:54 ` [Xen-devel] [PATCH 8/8] xen/x86: " Juergen Gross
2020-02-13 18:38 ` [Xen-devel] [PATCH 0/8] xen: don't let keyhandlers block indefinitely on locks Andrew Cooper
2020-02-14 6:05 ` Jürgen Groß
2020-02-14 9:37 ` Jan Beulich
2020-02-19 12:14 ` Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200213125449.14226-4-jgross@suse.com \
--to=jgross@suse.com \
--cc=dfaggioli@suse.com \
--cc=george.dunlap@eu.citrix.com \
--cc=mengxu@cis.upenn.edu \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).