From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
George Dunlap <george.dunlap@eu.citrix.com>,
Dario Faggioli <dfaggioli@suse.com>
Subject: [Xen-devel] [PATCH v2 27/48] xen/sched: Change vcpu_migrate_*() to operate on schedule unit
Date: Fri, 9 Aug 2019 16:58:12 +0200 [thread overview]
Message-ID: <20190809145833.1020-28-jgross@suse.com> (raw)
In-Reply-To: <20190809145833.1020-1-jgross@suse.com>
Now that vcpu_migrate_start() and vcpu_migrate_finish() are used only
to ensure a vcpu is running on a suitable processor they can be
switched to operate on schedule units instead of vcpus.
While doing that rename them accordingly and make the _start() variant
static. As it is needed anyway call vcpu_sync_execstate() for each
vcpu of the unit when changing processors.
vcpu_move_locked() is switched to schedule unit, too.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
xen/common/schedule.c | 106 ++++++++++++++++++++++++++++++--------------------
1 file changed, 63 insertions(+), 43 deletions(-)
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 4c488ddde0..e4d0dd4b65 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -733,35 +733,40 @@ void vcpu_unblock(struct vcpu *v)
}
/*
- * Do the actual movement of a vcpu from old to new CPU. Locks for *both*
+ * Do the actual movement of an unit from old to new CPU. Locks for *both*
* CPUs needs to have been taken already when calling this!
*/
-static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
+static void sched_unit_move_locked(struct sched_unit *unit,
+ unsigned int new_cpu)
{
- unsigned int old_cpu = v->processor;
+ unsigned int old_cpu = unit->res->processor;
+ struct vcpu *v;
/*
* Transfer urgency status to new CPU before switching CPUs, as
* once the switch occurs, v->is_urgent is no longer protected by
* the per-CPU scheduler lock we are holding.
*/
- if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
+ for_each_sched_unit_vcpu ( unit, v )
{
- atomic_inc(&get_sched_res(new_cpu)->urgent_count);
- atomic_dec(&get_sched_res(old_cpu)->urgent_count);
+ if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
+ {
+ atomic_inc(&get_sched_res(new_cpu)->urgent_count);
+ atomic_dec(&get_sched_res(old_cpu)->urgent_count);
+ }
}
/*
* Actual CPU switch to new CPU. This is safe because the lock
* pointer can't change while the current lock is held.
*/
- sched_migrate(vcpu_scheduler(v), v->sched_unit, new_cpu);
+ sched_migrate(unit_scheduler(unit), unit, new_cpu);
}
/*
* Initiating migration
*
- * In order to migrate, we need the vcpu in question to have stopped
+ * In order to migrate, we need the unit in question to have stopped
* running and had sched_sleep() called (to take it off any
* runqueues, for instance); and if it is currently running, it needs
* to be scheduled out. Finally, we need to hold the scheduling locks
@@ -777,37 +782,45 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
* should be called like this:
*
* lock = unit_schedule_lock_irq(unit);
- * vcpu_migrate_start(v);
+ * sched_unit_migrate_start(unit);
* unit_schedule_unlock_irq(lock, unit)
- * vcpu_migrate_finish(v);
+ * sched_unit_migrate_finish(unit);
*
- * vcpu_migrate_finish() will do the work now if it can, or simply
- * return if it can't (because v is still running); in that case
- * vcpu_migrate_finish() will be called by context_saved().
+ * sched_unit_migrate_finish() will do the work now if it can, or simply
+ * return if it can't (because unit is still running); in that case
+ * sched_unit_migrate_finish() will be called by context_saved().
*/
-static void vcpu_migrate_start(struct vcpu *v)
+static void sched_unit_migrate_start(struct sched_unit *unit)
{
- set_bit(_VPF_migrating, &v->pause_flags);
- vcpu_sleep_nosync_locked(v);
+ struct vcpu *v;
+
+ for_each_sched_unit_vcpu ( unit, v )
+ {
+ set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_sleep_nosync_locked(v);
+ }
}
-static void vcpu_migrate_finish(struct vcpu *v)
+static void sched_unit_migrate_finish(struct sched_unit *unit)
{
unsigned long flags;
unsigned int old_cpu, new_cpu;
spinlock_t *old_lock, *new_lock;
bool_t pick_called = 0;
+ struct vcpu *v;
/*
- * If the vcpu is currently running, this will be handled by
+ * If the unit is currently running, this will be handled by
* context_saved(); and in any case, if the bit is cleared, then
* someone else has already done the work so we don't need to.
*/
- if ( v->sched_unit->is_running ||
- !test_bit(_VPF_migrating, &v->pause_flags) )
- return;
+ for_each_sched_unit_vcpu ( unit, v )
+ {
+ if ( unit->is_running || !test_bit(_VPF_migrating, &v->pause_flags) )
+ return;
+ }
- old_cpu = new_cpu = v->processor;
+ old_cpu = new_cpu = unit->res->processor;
for ( ; ; )
{
/*
@@ -820,7 +833,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
sched_spin_lock_double(old_lock, new_lock, &flags);
- old_cpu = v->processor;
+ old_cpu = unit->res->processor;
if ( old_lock == get_sched_res(old_cpu)->schedule_lock )
{
/*
@@ -829,15 +842,15 @@ static void vcpu_migrate_finish(struct vcpu *v)
*/
if ( pick_called &&
(new_lock == get_sched_res(new_cpu)->schedule_lock) &&
- cpumask_test_cpu(new_cpu, v->sched_unit->cpu_hard_affinity) &&
- cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
+ cpumask_test_cpu(new_cpu, unit->cpu_hard_affinity) &&
+ cpumask_test_cpu(new_cpu, unit->domain->cpupool->cpu_valid) )
break;
/* Select a new CPU. */
- new_cpu = sched_pick_resource(vcpu_scheduler(v),
- v->sched_unit)->processor;
+ new_cpu = sched_pick_resource(unit_scheduler(unit),
+ unit)->processor;
if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
- cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
+ cpumask_test_cpu(new_cpu, unit->domain->cpupool->cpu_valid) )
break;
pick_called = 1;
}
@@ -858,22 +871,30 @@ static void vcpu_migrate_finish(struct vcpu *v)
* because they both happen in (different) spinlock regions, and those
* regions are strictly serialised.
*/
- if ( v->sched_unit->is_running ||
- !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
+ for_each_sched_unit_vcpu ( unit, v )
{
- sched_spin_unlock_double(old_lock, new_lock, flags);
- return;
+ if ( unit->is_running ||
+ !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
+ {
+ sched_spin_unlock_double(old_lock, new_lock, flags);
+ return;
+ }
}
- vcpu_move_locked(v, new_cpu);
+ sched_unit_move_locked(unit, new_cpu);
sched_spin_unlock_double(old_lock, new_lock, flags);
if ( old_cpu != new_cpu )
- sched_move_irqs(v->sched_unit);
+ {
+ for_each_sched_unit_vcpu ( unit, v )
+ sync_vcpu_execstate(v);
+ sched_move_irqs(unit);
+ }
/* Wake on new CPU. */
- vcpu_wake(v);
+ for_each_sched_unit_vcpu ( unit, v )
+ vcpu_wake(v);
}
/*
@@ -1041,10 +1062,9 @@ int cpu_disable_scheduler(unsigned int cpu)
* * the scheduler will always find a suitable solution, or
* things would have failed before getting in here.
*/
- vcpu_migrate_start(unit->vcpu_list);
+ sched_unit_migrate_start(unit);
unit_schedule_unlock_irqrestore(lock, flags, unit);
-
- vcpu_migrate_finish(unit->vcpu_list);
+ sched_unit_migrate_finish(unit);
/*
* The only caveat, in this case, is that if a vcpu active in
@@ -1128,14 +1148,14 @@ static int vcpu_set_affinity(
ASSERT(which == unit->cpu_soft_affinity);
sched_set_affinity(v, NULL, affinity);
}
- vcpu_migrate_start(v);
+ sched_unit_migrate_start(unit);
}
unit_schedule_unlock_irq(lock, unit);
domain_update_node_affinity(v->domain);
- vcpu_migrate_finish(v);
+ sched_unit_migrate_finish(unit);
return ret;
}
@@ -1396,12 +1416,12 @@ int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason)
migrate = !ret && !cpumask_test_cpu(v->processor, unit->cpu_hard_affinity);
if ( migrate )
- vcpu_migrate_start(v);
+ sched_unit_migrate_start(unit);
unit_schedule_unlock_irq(lock, unit);
if ( migrate )
- vcpu_migrate_finish(v);
+ sched_unit_migrate_finish(unit);
return ret;
}
@@ -1794,7 +1814,7 @@ void context_saved(struct vcpu *prev)
sched_context_saved(vcpu_scheduler(prev), prev->sched_unit);
- vcpu_migrate_finish(prev);
+ sched_unit_migrate_finish(prev->sched_unit);
}
/* The scheduler timer: force a run through the scheduler */
--
2.16.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2019-08-09 14:59 UTC|newest]
Thread overview: 126+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-09 14:57 [Xen-devel] [PATCH v2 00/48] xen: add core scheduling support Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 01/48] xen/sched: use new sched_unit instead of vcpu in scheduler interfaces Juergen Gross
2019-09-02 9:07 ` Jan Beulich
2019-09-09 5:26 ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 02/48] xen/sched: move per-vcpu scheduler private data pointer to sched_unit Juergen Gross
2019-08-23 10:47 ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 03/48] xen/sched: build a linked list of struct sched_unit Juergen Gross
2019-08-23 10:52 ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 04/48] xen/sched: introduce struct sched_resource Juergen Gross
2019-08-23 10:54 ` Dario Faggioli
2019-09-04 13:10 ` Jan Beulich
2019-09-09 5:31 ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 05/48] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-09-04 13:34 ` Jan Beulich
2019-09-09 5:43 ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 06/48] xen/sched: switch schedule_data.curr to point at sched_unit Juergen Gross
2019-09-04 13:36 ` Jan Beulich
2019-09-09 5:46 ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 07/48] xen/sched: move per cpu scheduler private data into struct sched_resource Juergen Gross
2019-09-04 13:48 ` Jan Beulich
2019-09-05 7:13 ` Juergen Gross
2019-09-05 7:38 ` Jan Beulich
2019-09-09 13:03 ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 08/48] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock Juergen Gross
2019-09-04 14:02 ` Jan Beulich
2019-09-04 14:41 ` Juergen Gross
2019-09-04 14:54 ` Jan Beulich
2019-09-04 15:02 ` Juergen Gross
2019-09-11 16:02 ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 09/48] xen/sched: move some per-vcpu items to struct sched_unit Juergen Gross
2019-09-04 14:16 ` Jan Beulich
2019-09-09 6:39 ` Juergen Gross
2019-09-09 6:55 ` Jan Beulich
2019-09-09 7:05 ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 10/48] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-09-04 14:49 ` Jan Beulich
2019-09-11 13:22 ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 11/48] xen/sched: rename scheduler related perf counters Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 12/48] xen/sched: switch struct task_slice from vcpu to sched_unit Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 13/48] xen/sched: add is_running indicator to struct sched_unit Juergen Gross
2019-09-04 15:06 ` Jan Beulich
2019-09-11 13:44 ` Juergen Gross
2019-09-11 15:06 ` Jan Beulich
2019-09-11 15:32 ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 14/48] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 15/48] xen/sched: make rt " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 16/48] xen/sched: make credit " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 17/48] xen/sched: make credit2 " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 18/48] xen/sched: make arinc653 " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 19/48] xen: add sched_unit_pause_nosync() and sched_unit_unpause() Juergen Gross
2019-09-09 13:34 ` Jan Beulich
2019-09-11 14:15 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 20/48] xen: let vcpu_create() select processor Juergen Gross
2019-08-23 16:42 ` Julien Grall
2019-09-09 13:38 ` Jan Beulich
2019-09-11 14:22 ` Juergen Gross
2019-09-11 17:20 ` Dario Faggioli
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 21/48] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-09-09 14:17 ` Jan Beulich
2019-09-12 9:34 ` Juergen Gross
2019-09-12 10:04 ` Jan Beulich
2019-09-12 11:03 ` Juergen Gross
2019-09-12 11:17 ` Juergen Gross
2019-09-12 11:46 ` Jan Beulich
2019-09-12 11:53 ` Juergen Gross
2019-09-12 12:08 ` Jan Beulich
2019-09-12 12:13 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 22/48] xen/sched: switch schedule() from vcpus to sched_units Juergen Gross
2019-09-09 14:35 ` Jan Beulich
2019-09-12 13:44 ` Juergen Gross
2019-09-12 14:34 ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 23/48] xen/sched: switch sched_move_irqs() to take sched_unit as parameter Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 24/48] xen: switch from for_each_vcpu() to for_each_sched_unit() Juergen Gross
2019-09-09 15:14 ` Jan Beulich
2019-09-12 14:02 ` Juergen Gross
2019-09-12 14:40 ` Jan Beulich
2019-09-12 14:47 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 25/48] xen/sched: add runstate counters to struct sched_unit Juergen Gross
2019-09-09 14:30 ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 26/48] xen/sched: rework and rename vcpu_force_reschedule() Juergen Gross
2019-09-10 14:06 ` Jan Beulich
2019-09-13 9:33 ` Juergen Gross
2019-09-13 9:40 ` Jan Beulich
2019-08-09 14:58 ` Juergen Gross [this message]
2019-09-10 15:11 ` [Xen-devel] [PATCH v2 27/48] xen/sched: Change vcpu_migrate_*() to operate on schedule unit Jan Beulich
2019-09-13 12:33 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 28/48] xen/sched: move struct task_slice into struct sched_unit Juergen Gross
2019-09-10 15:18 ` Jan Beulich
2019-09-13 12:56 ` Juergen Gross
2019-09-12 8:13 ` Dario Faggioli
2019-09-12 8:21 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 29/48] xen/sched: add code to sync scheduling of all vcpus of a sched unit Juergen Gross
2019-09-10 15:36 ` Jan Beulich
2019-09-13 13:12 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 30/48] xen/sched: introduce unit_runnable_state() Juergen Gross
2019-09-11 10:30 ` Jan Beulich
2019-09-12 10:22 ` Dario Faggioli
2019-09-13 14:07 ` Juergen Gross
2019-09-13 14:44 ` Jan Beulich
2019-09-13 15:23 ` Juergen Gross
2019-09-12 10:24 ` Dario Faggioli
2019-09-13 14:14 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 31/48] xen/sched: add support for multiple vcpus per sched unit where missing Juergen Gross
2019-09-11 10:43 ` Jan Beulich
2019-09-13 15:01 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 32/48] xen/sched: modify cpupool_domain_cpumask() to be an unit mask Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 33/48] xen/sched: support allocating multiple vcpus into one sched unit Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 34/48] xen/sched: add a percpu resource index Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 35/48] xen/sched: add fall back to idle vcpu when scheduling unit Juergen Gross
2019-09-11 11:33 ` Julien Grall
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 36/48] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 37/48] xen/sched: carve out freeing sched_unit memory into dedicated function Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 38/48] xen/sched: move per-cpu variable scheduler to struct sched_resource Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 39/48] xen/sched: move per-cpu variable cpupool " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 40/48] xen/sched: reject switching smt on/off with core scheduling active Juergen Gross
2019-09-10 15:47 ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 41/48] xen/sched: prepare per-cpupool scheduling granularity Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 42/48] xen/sched: split schedule_cpu_switch() Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 43/48] xen/sched: protect scheduling resource via rcu Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 44/48] xen/sched: support multiple cpus per scheduling resource Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 45/48] xen/sched: support differing granularity in schedule_cpu_[add/rm]() Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 46/48] xen/sched: support core scheduling for moving cpus to/from cpupools Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 47/48] xen/sched: disable scheduling when entering ACPI deep sleep states Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 48/48] xen/sched: add scheduling granularity enum Juergen Gross
2019-08-15 10:17 ` [Xen-devel] [PATCH v2 00/48] xen: add core scheduling support Sergey Dyasli
2019-09-05 6:22 ` Juergen Gross
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190809145833.1020-28-jgross@suse.com \
--to=jgross@suse.com \
--cc=dfaggioli@suse.com \
--cc=george.dunlap@eu.citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).