All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Meng Xu <mengxu@cis.upenn.edu>,
	Dario Faggioli <dfaggioli@suse.com>
Subject: [Xen-devel] [PATCH 5/8] xen/sched: use keyhandler locks when dumping data to console
Date: Thu, 13 Feb 2020 13:54:46 +0100	[thread overview]
Message-ID: <20200213125449.14226-6-jgross@suse.com> (raw)
In-Reply-To: <20200213125449.14226-1-jgross@suse.com>

Instead of using the normal locks use the keyhandler provided trylocks
with timeouts. This requires a special primitive for the scheduler
lock.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched/core.c    |  7 +++++++
 xen/common/sched/cpupool.c |  4 +++-
 xen/common/sched/credit.c  | 25 ++++++++++++++++++-------
 xen/common/sched/credit2.c | 17 +++++++++++------
 xen/common/sched/null.c    | 42 +++++++++++++++++++++++++-----------------
 xen/common/sched/private.h |  1 +
 xen/common/sched/rt.c      |  7 +++++--
 7 files changed, 70 insertions(+), 33 deletions(-)

diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index d4e8944e0e..7b8b0fe80e 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -21,6 +21,7 @@
 #include <xen/domain.h>
 #include <xen/delay.h>
 #include <xen/event.h>
+#include <xen/keyhandler.h>
 #include <xen/time.h>
 #include <xen/timer.h>
 #include <xen/perfc.h>
@@ -3302,6 +3303,12 @@ void __init sched_setup_dom0_vcpus(struct domain *d)
 }
 #endif
 
+spinlock_t *keyhandler_pcpu_lock(unsigned int cpu)
+{
+    keyhandler_lock_body(spinlock_t *, pcpu_schedule_trylock(cpu),
+                         "could not get pcpu lock, cpu=%u\n", cpu);
+}
+
 #ifdef CONFIG_COMPAT
 #include "compat.c"
 #endif
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 476916c6ea..5c181e9772 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -893,7 +893,9 @@ void dump_runq(unsigned char key)
     s_time_t         now = NOW();
     struct cpupool **c;
 
-    spin_lock(&cpupool_lock);
+    if ( !keyhandler_spin_lock(&cpupool_lock, "could not get cpupools") )
+        return;
+
     local_irq_save(flags);
 
     printk("sched_smt_power_savings: %s\n",
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index dee87e7fe2..165ff26bb8 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -2057,8 +2057,15 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
      * - we scan through the runqueue, so we need the proper runqueue
      *   lock (the one of the runqueue of this cpu).
      */
-    spin_lock(&prv->lock);
-    lock = pcpu_schedule_lock(cpu);
+    if ( !keyhandler_spin_lock(&prv->lock, "could not get credit data") )
+        return;
+
+    lock = keyhandler_pcpu_lock(cpu);
+    if ( !lock )
+    {
+        spin_unlock(&prv->lock);
+        return;
+    }
 
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
@@ -2098,7 +2105,8 @@ csched_dump(const struct scheduler *ops)
     struct csched_private *prv = CSCHED_PRIV(ops);
     int loop;
 
-    spin_lock(&prv->lock);
+    if ( !keyhandler_spin_lock(&prv->lock, "could not get credit data") )
+        return;
 
     printk("info:\n"
            "\tncpus              = %u\n"
@@ -2142,12 +2150,15 @@ csched_dump(const struct scheduler *ops)
             spinlock_t *lock;
 
             svc = list_entry(iter_svc, struct csched_unit, active_unit_elem);
-            lock = unit_schedule_lock(svc->unit);
+            lock = keyhandler_pcpu_lock(svc->unit->res->master_cpu);
 
-            printk("\t%3d: ", ++loop);
-            csched_dump_unit(svc);
+            if ( lock )
+            {
+                printk("\t%3d: ", ++loop);
+                csched_dump_unit(svc);
 
-            unit_schedule_unlock(lock, svc->unit);
+                pcpu_schedule_unlock(lock, svc->unit->res->master_cpu);
+            }
         }
     }
 
diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index e76d2ed543..28b03fe744 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -3655,7 +3655,8 @@ csched2_dump(const struct scheduler *ops)
      * We need the private scheduler lock as we access global
      * scheduler data and (below) the list of active domains.
      */
-    read_lock(&prv->lock);
+    if ( !keyhandler_read_lock(&prv->lock, "could not get credit2 data") )
+        return;
 
     printk("Active queues: %d\n"
            "\tdefault-weight     = %d\n",
@@ -3711,12 +3712,15 @@ csched2_dump(const struct scheduler *ops)
             struct csched2_unit * const svc = csched2_unit(unit);
             spinlock_t *lock;
 
-            lock = unit_schedule_lock(unit);
+            lock = keyhandler_pcpu_lock(unit->res->master_cpu);
 
-            printk("\t%3d: ", ++loop);
-            csched2_dump_unit(prv, svc);
+            if ( lock )
+            {
+                printk("\t%3d: ", ++loop);
+                csched2_dump_unit(prv, svc);
 
-            unit_schedule_unlock(lock, unit);
+                pcpu_schedule_unlock(lock, unit->res->master_cpu);
+            }
         }
     }
 
@@ -3727,7 +3731,8 @@ csched2_dump(const struct scheduler *ops)
         int loop = 0;
 
         /* We need the lock to scan the runqueue. */
-        spin_lock(&rqd->lock);
+        if ( !keyhandler_spin_lock(&rqd->lock, "could not get runq") )
+            continue;
 
         printk("Runqueue %d:\n", i);
 
diff --git a/xen/common/sched/null.c b/xen/common/sched/null.c
index 3b31703d7e..fe59ce17fe 100644
--- a/xen/common/sched/null.c
+++ b/xen/common/sched/null.c
@@ -28,6 +28,7 @@
  * if the scheduler is used inside a cpupool.
  */
 
+#include <xen/keyhandler.h>
 #include <xen/sched.h>
 #include <xen/softirq.h>
 #include <xen/trace.h>
@@ -982,7 +983,8 @@ static void null_dump(const struct scheduler *ops)
     struct list_head *iter;
     unsigned int loop;
 
-    spin_lock(&prv->lock);
+    if ( !keyhandler_spin_lock(&prv->lock, "could not get null data") )
+        return;
 
     printk("\tcpus_free = %*pbl\n", CPUMASK_PR(&prv->cpus_free));
 
@@ -1001,31 +1003,37 @@ static void null_dump(const struct scheduler *ops)
             struct null_unit * const nvc = null_unit(unit);
             spinlock_t *lock;
 
-            lock = unit_schedule_lock(unit);
+            lock = keyhandler_pcpu_lock(unit->res->master_cpu);
 
-            printk("\t%3d: ", ++loop);
-            dump_unit(prv, nvc);
-            printk("\n");
+            if ( lock )
+            {
+                printk("\t%3d: ", ++loop);
+                dump_unit(prv, nvc);
+                printk("\n");
 
-            unit_schedule_unlock(lock, unit);
+                pcpu_schedule_unlock(lock, unit->res->master_cpu);
+            }
         }
     }
 
     printk("Waitqueue: ");
     loop = 0;
-    spin_lock(&prv->waitq_lock);
-    list_for_each( iter, &prv->waitq )
+    if ( keyhandler_spin_lock(&prv->waitq_lock, "could not get waitq") )
     {
-        struct null_unit *nvc = list_entry(iter, struct null_unit, waitq_elem);
-
-        if ( loop++ != 0 )
-            printk(", ");
-        if ( loop % 24 == 0 )
-            printk("\n\t");
-        printk("%pdv%d", nvc->unit->domain, nvc->unit->unit_id);
+        list_for_each( iter, &prv->waitq )
+        {
+            struct null_unit *nvc = list_entry(iter, struct null_unit,
+                                               waitq_elem);
+
+            if ( loop++ != 0 )
+                printk(", ");
+            if ( loop % 24 == 0 )
+                printk("\n\t");
+            printk("%pdv%d", nvc->unit->domain, nvc->unit->unit_id);
+        }
+        printk("\n");
+        spin_unlock(&prv->waitq_lock);
     }
-    printk("\n");
-    spin_unlock(&prv->waitq_lock);
 
     spin_unlock(&prv->lock);
 }
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index 2a94179baa..6723f74d28 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -631,5 +631,6 @@ struct cpupool *cpupool_get_by_id(int poolid);
 void cpupool_put(struct cpupool *pool);
 int cpupool_add_domain(struct domain *d, int poolid);
 void cpupool_rm_domain(struct domain *d);
+spinlock_t *keyhandler_pcpu_lock(unsigned int cpu);
 
 #endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index 16379cb2d2..d4b17e0f8b 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -354,7 +354,9 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu)
     struct rt_private *prv = rt_priv(ops);
     const struct rt_unit *svc;
 
-    spin_lock(&prv->lock);
+    if ( !keyhandler_spin_lock(&prv->lock, "could not get rt data") )
+        return;
+
     printk("CPU[%02d]\n", cpu);
     /* current UNIT (nothing to say if that's the idle unit). */
     svc = rt_unit(curr_on_cpu(cpu));
@@ -373,7 +375,8 @@ rt_dump(const struct scheduler *ops)
     const struct rt_unit *svc;
     const struct rt_dom *sdom;
 
-    spin_lock(&prv->lock);
+    if ( !keyhandler_spin_lock(&prv->lock, "could not get rt data") )
+        return;
 
     if ( list_empty(&prv->sdom) )
         goto out;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2020-02-13 12:55 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-13 12:54 [Xen-devel] [PATCH 0/8] xen: don't let keyhandlers block indefinitely on locks Juergen Gross
2020-02-13 12:54 ` [Xen-devel] [PATCH 1/8] xen: make rangeset_printk() static Juergen Gross
2020-02-13 14:00   ` Jan Beulich
2020-02-13 12:54 ` [Xen-devel] [PATCH 2/8] xen: add using domlist_read_lock in keyhandlers Juergen Gross
2020-02-13 14:01   ` Jan Beulich
2020-02-13 14:09   ` George Dunlap
2020-02-18  5:42   ` Tian, Kevin
2020-02-13 12:54 ` [Xen-devel] [PATCH 3/8] xen/sched: don't use irqsave locks in dumping functions Juergen Gross
2020-02-19 12:40   ` Dario Faggioli
2020-02-19 14:27   ` Jan Beulich
2020-02-19 15:02     ` Jürgen Groß
2020-02-19 15:47       ` Dario Faggioli
2020-02-13 12:54 ` [Xen-devel] [PATCH 4/8] xen: add locks with timeouts for keyhandlers Juergen Gross
2020-03-05 15:25   ` Jan Beulich
2020-03-06  8:08     ` Jürgen Groß
2020-03-06  8:15       ` Jürgen Groß
2020-02-13 12:54 ` Juergen Gross [this message]
2020-02-19 14:31   ` [Xen-devel] [PATCH 5/8] xen/sched: use keyhandler locks when dumping data to console Dario Faggioli
2020-02-19 15:09     ` Jürgen Groß
2020-02-13 12:54 ` [Xen-devel] [PATCH 6/8] xen/common: " Juergen Gross
2020-02-13 12:54 ` [Xen-devel] [PATCH 7/8] xen/drivers: " Juergen Gross
2020-02-13 12:54 ` [Xen-devel] [PATCH 8/8] xen/x86: " Juergen Gross
2020-02-13 18:38 ` [Xen-devel] [PATCH 0/8] xen: don't let keyhandlers block indefinitely on locks Andrew Cooper
2020-02-14  6:05   ` Jürgen Groß
2020-02-14  9:37   ` Jan Beulich
2020-02-19 12:14     ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200213125449.14226-6-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.