All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>, Tim Deegan <tim@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Wei Liu <wei.liu2@citrix.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	George Dunlap <George.Dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Dario Faggioli <dfaggioli@suse.com>,
	Julien Grall <julien.grall@arm.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH 5/6] xen/cpupool: simplify suspend/resume handling
Date: Mon, 18 Mar 2019 14:11:54 +0100	[thread overview]
Message-ID: <20190318131155.29450-6-jgross@suse.com> (raw)
In-Reply-To: <20190318131155.29450-1-jgross@suse.com>

Instead of removing cpus temporarily from cpupools during
suspend/resume only remove cpus finally which didn't come up when
resuming.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/cpupool.c       | 130 ++++++++++++++++++---------------------------
 xen/include/xen/sched-if.h |   1 -
 2 files changed, 51 insertions(+), 80 deletions(-)

diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index e89bb67e71..ed689fd290 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -47,12 +47,6 @@ static struct cpupool *alloc_cpupool_struct(void)
         xfree(c);
         c = NULL;
     }
-    else if ( !zalloc_cpumask_var(&c->cpu_suspended) )
-    {
-        free_cpumask_var(c->cpu_valid);
-        xfree(c);
-        c = NULL;
-    }
 
     return c;
 }
@@ -60,10 +54,7 @@ static struct cpupool *alloc_cpupool_struct(void)
 static void free_cpupool_struct(struct cpupool *c)
 {
     if ( c )
-    {
-        free_cpumask_var(c->cpu_suspended);
         free_cpumask_var(c->cpu_valid);
-    }
     xfree(c);
 }
 
@@ -477,10 +468,6 @@ void cpupool_rm_domain(struct domain *d)
 /*
  * Called to add a cpu to a pool. CPUs being hot-plugged are added to pool0,
  * as they must have been in there when unplugged.
- *
- * If, on the other hand, we are adding CPUs because we are resuming (e.g.,
- * after ACPI S3) we put the cpu back in the pool where it was in prior when
- * we suspended.
  */
 static int cpupool_cpu_add(unsigned int cpu)
 {
@@ -490,42 +477,15 @@ static int cpupool_cpu_add(unsigned int cpu)
     cpumask_clear_cpu(cpu, &cpupool_locked_cpus);
     cpumask_set_cpu(cpu, &cpupool_free_cpus);
 
-    if ( system_state == SYS_STATE_suspend || system_state == SYS_STATE_resume )
-    {
-        struct cpupool **c;
-
-        for_each_cpupool(c)
-        {
-            if ( cpumask_test_cpu(cpu, (*c)->cpu_suspended ) )
-            {
-                ret = cpupool_assign_cpu_locked(*c, cpu);
-                if ( ret )
-                    goto out;
-                cpumask_clear_cpu(cpu, (*c)->cpu_suspended);
-                break;
-            }
-        }
+    /*
+     * If we are not resuming, we are hot-plugging cpu, and in which case
+     * we add it to pool0, as it certainly was there when hot-unplagged
+     * (or unplugging would have failed) and that is the default behavior
+     * anyway.
+     */
+    per_cpu(cpupool, cpu) = NULL;
+    ret = cpupool_assign_cpu_locked(cpupool0, cpu);
 
-        /*
-         * Either cpu has been found as suspended in a pool, and added back
-         * there, or it stayed free (if it did not belong to any pool when
-         * suspending), and we don't want to do anything.
-         */
-        ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus) ||
-               cpumask_test_cpu(cpu, (*c)->cpu_valid));
-    }
-    else
-    {
-        /*
-         * If we are not resuming, we are hot-plugging cpu, and in which case
-         * we add it to pool0, as it certainly was there when hot-unplagged
-         * (or unplugging would have failed) and that is the default behavior
-         * anyway.
-         */
-        per_cpu(cpupool, cpu) = NULL;
-        ret = cpupool_assign_cpu_locked(cpupool0, cpu);
-    }
- out:
     spin_unlock(&cpupool_lock);
 
     return ret;
@@ -535,42 +495,14 @@ static int cpupool_cpu_add(unsigned int cpu)
  * Called to remove a CPU from a pool. The CPU is locked, to forbid removing
  * it from pool0. In fact, if we want to hot-unplug a CPU, it must belong to
  * pool0, or we fail.
- *
- * However, if we are suspending (e.g., to ACPI S3), we mark the CPU in such
- * a way that it can be put back in its pool when resuming.
  */
 static int cpupool_cpu_remove(unsigned int cpu)
 {
     int ret = -ENODEV;
 
     spin_lock(&cpupool_lock);
-    if ( system_state == SYS_STATE_suspend )
-    {
-        struct cpupool **c;
-
-        for_each_cpupool(c)
-        {
-            if ( cpumask_test_cpu(cpu, (*c)->cpu_valid ) )
-            {
-                cpumask_set_cpu(cpu, (*c)->cpu_suspended);
-                cpumask_clear_cpu(cpu, (*c)->cpu_valid);
-                break;
-            }
-        }
 
-        /*
-         * Either we found cpu in a pool, or it must be free (if it has been
-         * hot-unplagged, then we must have found it in pool0). It is, of
-         * course, fine to suspend or shutdown with CPUs not assigned to a
-         * pool, and (in case of suspend) they will stay free when resuming.
-         */
-        ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus) ||
-               cpumask_test_cpu(cpu, (*c)->cpu_suspended));
-        ASSERT(cpumask_test_cpu(cpu, &cpu_online_map) ||
-               cpumask_test_cpu(cpu, cpupool0->cpu_suspended));
-        ret = 0;
-    }
-    else if ( cpumask_test_cpu(cpu, cpupool0->cpu_valid) )
+    if ( cpumask_test_cpu(cpu, cpupool0->cpu_valid) )
     {
         /*
          * If we are not suspending, we are hot-unplugging cpu, and that is
@@ -587,6 +519,41 @@ static int cpupool_cpu_remove(unsigned int cpu)
     return ret;
 }
 
+/*
+ * Called during resume for all cpus which didn't come up again. The cpu must
+ * be removed from the cpupool it is assigned to. In case a cpupool will be
+ * left without cpu we move all domains of that cpupool to cpupool0.
+ */
+static void cpupool_cpu_remove_forced(unsigned int cpu)
+{
+    struct cpupool **c;
+    struct domain *d;
+
+    spin_lock(&cpupool_lock);
+
+    if ( cpumask_test_cpu(cpu, &cpupool_free_cpus) )
+        cpumask_clear_cpu(cpu, &cpupool_free_cpus);
+    else
+    {
+        for_each_cpupool(c)
+        {
+            if ( cpumask_test_cpu(cpu, (*c)->cpu_valid) )
+            {
+                cpumask_clear_cpu(cpu, (*c)->cpu_valid);
+                if ( cpumask_weight((*c)->cpu_valid) == 0 )
+                {
+                    if ( *c == cpupool0 )
+                        panic("No cpu left in cpupool0\n");
+                    for_each_domain_in_cpupool(d, *c)
+                        cpupool_move_domain_locked(d, cpupool0);
+                }
+            }
+        }
+    }
+
+    spin_unlock(&cpupool_lock);
+}
+
 /*
  * do cpupool related sysctl operations
  */
@@ -774,10 +741,15 @@ static int cpu_callback(
     {
     case CPU_DOWN_FAILED:
     case CPU_ONLINE:
-        rc = cpupool_cpu_add(cpu);
+        if ( system_state <= SYS_STATE_active )
+            rc = cpupool_cpu_add(cpu);
         break;
     case CPU_DOWN_PREPARE:
-        rc = cpupool_cpu_remove(cpu);
+        if ( system_state <= SYS_STATE_active )
+            rc = cpupool_cpu_remove(cpu);
+        break;
+    case CPU_RESUME_FAILED:
+        cpupool_cpu_remove_forced(cpu);
         break;
     default:
         break;
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9596eae1e2..92bc7a0365 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -214,7 +214,6 @@ struct cpupool
 {
     int              cpupool_id;
     cpumask_var_t    cpu_valid;      /* all cpus assigned to pool */
-    cpumask_var_t    cpu_suspended;  /* cpus in S3 that should be in this pool */
     struct cpupool   *next;
     unsigned int     n_dom;
     struct scheduler *sched;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-03-18 13:12 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-18 13:11 [PATCH 0/6] xen: simplify suspend/resume handling Juergen Gross
2019-03-18 13:11 ` [PATCH 1/6] xen/sched: call cpu_disable_scheduler() via cpu notifier Juergen Gross
2019-03-27 15:34   ` Andrew Cooper
2019-03-27 15:35   ` George Dunlap
2019-03-27 16:22   ` Jan Beulich
2019-03-27 16:24   ` Dario Faggioli
2019-03-27 16:31     ` Juergen Gross
2019-03-27 16:51       ` Dario Faggioli
2019-03-27 16:53         ` Juergen Gross
     [not found]   ` <5C9BA336020000780022235B@suse.com>
2019-03-27 16:45     ` Juergen Gross
2019-03-27 16:58       ` Jan Beulich
2019-03-18 13:11 ` [PATCH 2/6] xen: add helper for calling notifier_call_chain() to common/cpu.c Juergen Gross
2019-03-25 11:56   ` Dario Faggioli
2019-03-27 12:25   ` George Dunlap
2019-03-27 15:39   ` Andrew Cooper
2019-03-27 16:05     ` Juergen Gross
2019-03-18 13:11 ` [PATCH 3/6] xen: add new cpu notifier action CPU_RESUME_FAILED Juergen Gross
2019-03-25 12:21   ` Dario Faggioli
2019-03-25 12:29     ` Juergen Gross
2019-03-27 15:54       ` Dario Faggioli
2019-03-27 15:49   ` George Dunlap
2019-03-27 16:29   ` Jan Beulich
     [not found]   ` <5C9BA5010200007800222375@suse.com>
2019-03-27 16:32     ` Juergen Gross
2019-03-18 13:11 ` [PATCH 4/6] xen: don't free percpu areas during suspend Juergen Gross
2019-03-25 18:14   ` Dario Faggioli
2019-03-27 15:55   ` Andrew Cooper
2019-03-27 16:18     ` Juergen Gross
2019-03-27 16:38       ` Jan Beulich
     [not found]       ` <5C9BA70E02000078002223A3@suse.com>
2019-03-27 16:52         ` Juergen Gross
2019-03-28  6:59           ` Juergen Gross
2019-03-28  8:03             ` Jan Beulich
2019-04-11  9:49               ` Jan Beulich
2019-04-11  9:49                 ` [Xen-devel] " Jan Beulich
     [not found]             ` <5C9C7FD202000078002225AB@suse.com>
2019-03-28  8:35               ` Juergen Gross
2019-03-28  9:36                 ` Jan Beulich
2019-03-28  7:46   ` Jan Beulich
     [not found]   ` <5C9C7BF1020000780022258F@suse.com>
2019-03-28  7:53     ` Juergen Gross
2019-03-28  8:04       ` Jan Beulich
2019-03-18 13:11 ` Juergen Gross [this message]
2019-03-27 15:56   ` [PATCH 5/6] xen/cpupool: simplify suspend/resume handling George Dunlap
2019-03-27 16:32   ` Dario Faggioli
2019-03-18 13:11 ` [PATCH 6/6] xen/sched: don't disable scheduler on cpus during suspend Juergen Gross
2019-03-27 23:10   ` Dario Faggioli
2019-03-28  5:41     ` Juergen Gross
     [not found] <20190318131155.29450*1*jgross@suse.com>
     [not found] ` <20190318131155.29450*5*jgross@suse.com>
     [not found] <20190318131155.29450****1****jgross@suse.com>
     [not found] ` <20190318131155.29450****5****jgross@suse.com>
     [not found]   ` <e10c14cd****54ac****8d8c****2d5c****db4adbd39d07@citrix.com>

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190318131155.29450-6-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.