All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dario Faggioli <dfaggioli@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Julien Grall <julien@xen.org>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 6/7] cpupool: create an the 'cpupool sync' infrastructure
Date: Thu, 28 May 2020 23:29:57 +0200	[thread overview]
Message-ID: <159070139727.12060.7434914618426479787.stgit@Palanthas> (raw)
In-Reply-To: <159070133878.12060.13318432301910522647.stgit@Palanthas>

In case we want to make some live changes to the configuration
of (typically) the scheduler of a cpupool, we need things to be
quiet in that pool.

Not necessarily like with stop machine, but we at least need
to make sure that no domains are neither running not sitting
in the runqueues of the scheduler itself.

In fact, we need exactly something like this mechanism, for
changing "on the fly" which CPUs are assigned to which runqueue
in a Credit2 cpupool (check the following changes).
Therefore, instead than doing something specific for such a
use case, let's implement a generic mechanism.

Reason is, of course, that it may turn out to be useful for
other purposes, in future. But even for this specific case,
it is much easier and cleaner to just cede control to cpupool
code, instead of trying to do everything inside the scheduler.

Within the new cpupool_sync() function, we want to pause all
domains of a pool, including potentially the one calling
the function. Therefore, we defer the pausing, the actual work
and also the unpausing to a tasklet.

Suggested-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Dario Faggioli <dfaggioli@suse.com>
---
Cc: Juergen Gross <jgross@suse.com>
Cc: George Dunlap <george.dunlap@citrix.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Julien Grall <julien@xen.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
---
Changes from v1:
* new patch
---
 xen/common/sched/cpupool.c |   52 ++++++++++++++++++++++++++++++++++++++++++++
 xen/common/sched/private.h |    6 +++++
 xen/include/xen/sched.h    |    1 +
 3 files changed, 59 insertions(+)

diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 7ea641ca26..122c371c7a 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -234,6 +234,42 @@ void cpupool_put(struct cpupool *pool)
     free_cpupool_struct(pool);
 }
 
+void do_cpupool_sync(void *arg)
+{
+    struct cpupool *c = arg;
+    struct domain *d;
+
+
+    spin_lock(&cpupool_lock);
+
+    /*
+     * With this second call (and this time to domain_pause()) we basically
+     * make sure that all the domains have actually stopped running.
+     */
+    rcu_read_lock(&domlist_read_lock);
+    for_each_domain_in_cpupool(d, c)
+        domain_pause(d);
+    rcu_read_unlock(&domlist_read_lock);
+
+    /*
+     * Let's invoke the function that the caller provided. We pass a reference
+     * to our own scheduler as a parameter, with which it should easily reach
+     * anything it needs.
+     */
+    c->sync_ctl.func(c->sched);
+
+    /* We called pause twice, so we need to to the same with unpause. */
+    rcu_read_lock(&domlist_read_lock);
+    for_each_domain_in_cpupool(d, c)
+    {
+        domain_unpause(d);
+        domain_unpause(d);
+    }
+    rcu_read_unlock(&domlist_read_lock);
+
+    spin_unlock(&cpupool_lock);
+}
+
 /*
  * create a new cpupool with specified poolid and scheduler
  * returns pointer to new cpupool structure if okay, NULL else
@@ -292,6 +328,8 @@ static struct cpupool *cpupool_create(
 
     *q = c;
 
+    tasklet_init(&c->sync_ctl.tasklet, do_cpupool_sync, c);
+
     spin_unlock(&cpupool_lock);
 
     debugtrace_printk("Created cpupool %d with scheduler %s (%s)\n",
@@ -332,6 +370,7 @@ static int cpupool_destroy(struct cpupool *c)
         return -EBUSY;
     }
     *q = c->next;
+    tasklet_kill(&c->sync_ctl.tasklet);
     spin_unlock(&cpupool_lock);
 
     cpupool_put(c);
@@ -372,6 +411,19 @@ int cpupool_move_domain(struct domain *d, struct cpupool *c)
     return ret;
 }
 
+void cpupool_sync(struct cpupool *c, void (*func)(void*))
+{
+    struct domain *d;
+
+    rcu_read_lock(&domlist_read_lock);
+    for_each_domain_in_cpupool(d, c)
+        domain_pause_nosync(d);
+    rcu_read_unlock(&domlist_read_lock);
+
+    c->sync_ctl.func = func;
+    tasklet_schedule_on_cpu(&c->sync_ctl.tasklet, cpumask_first(c->cpu_valid));
+}
+
 /*
  * assign a specific cpu to a cpupool
  * cpupool_lock must be held
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index df50976eb2..4705c8b119 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -503,6 +503,11 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
 #define REGISTER_SCHEDULER(x) static const struct scheduler *x##_entry \
   __used_section(".data.schedulers") = &x;
 
+struct cpupool_sync_ctl {
+    struct tasklet tasklet;
+    void (*func)(void*);
+};
+
 struct cpupool
 {
     int              cpupool_id;
@@ -514,6 +519,7 @@ struct cpupool
     struct scheduler *sched;
     atomic_t         refcnt;
     enum sched_gran  gran;
+    struct cpupool_sync_ctl sync_ctl;
 };
 
 static inline cpumask_t *cpupool_domain_master_cpumask(const struct domain *d)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index ac53519d7f..e2a233c96c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -1061,6 +1061,7 @@ extern enum cpufreq_controller {
 } cpufreq_controller;
 
 int cpupool_move_domain(struct domain *d, struct cpupool *c);
+void cpupool_sync(struct cpupool *c, void (*func)(void*));
 int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
 int cpupool_get_id(const struct domain *d);
 const cpumask_t *cpupool_valid_cpus(const struct cpupool *pool);



  parent reply	other threads:[~2020-05-28 21:30 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-28 21:29 [PATCH v2 0/7] xen: credit2: limit the number of CPUs per runqueue Dario Faggioli
2020-05-28 21:29 ` [PATCH v2 1/7] xen: credit2: factor cpu to runqueue matching in a function Dario Faggioli
2020-05-29 14:48   ` Jürgen Groß
2020-05-28 21:29 ` [PATCH v2 2/7] xen: credit2: factor runqueue initialization in its own function Dario Faggioli
2020-05-29 14:50   ` Jürgen Groß
2020-05-28 21:29 ` [PATCH v2 3/7] xen: cpupool: add a back-pointer from a scheduler to its pool Dario Faggioli
2020-05-29 14:54   ` Jürgen Groß
2020-05-29 14:56     ` Dario Faggioli
2020-05-28 21:29 ` [PATCH v2 4/7] xen: credit2: limit the max number of CPUs in a runqueue Dario Faggioli
2020-05-29 15:23   ` Jürgen Groß
2020-05-29 15:36     ` Dario Faggioli
2020-05-28 21:29 ` [PATCH v2 5/7] xen: credit2: compute cpus per-runqueue more dynamically Dario Faggioli
2020-05-28 21:29 ` Dario Faggioli [this message]
2020-05-28 21:30 ` [PATCH v2 7/7] xen: credit2: rebalance the number of CPUs in the scheduler runqueues Dario Faggioli
2020-07-21 12:08 ` [PATCH v2 0/7] xen: credit2: limit the number of CPUs per runqueue Jan Beulich
2020-07-22 15:33   ` Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=159070139727.12060.7434914618426479787.stgit@Palanthas \
    --to=dfaggioli@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jgross@suse.com \
    --cc=julien@xen.org \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.