All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>, Tim Deegan <tim@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Robert VanVossen <robert.vanvossen@dornerworks.com>,
	Dario Faggioli <dfaggioli@suse.com>,
	Julien Grall <julien.grall@arm.com>,
	Josh Whitehead <josh.whitehead@dornerworks.com>,
	Meng Xu <mengxu@cis.upenn.edu>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH 10/60] xen/sched: switch schedule_data.curr to point at sched_unit
Date: Tue, 28 May 2019 12:32:23 +0200	[thread overview]
Message-ID: <20190528103313.1343-11-jgross@suse.com> (raw)
In-Reply-To: <20190528103313.1343-1-jgross@suse.com>

In preparation of core scheduling let the percpu pointer
schedule_data.curr point to a strct sched_unit instead of the related
vcpu. At the same time rename the per-vcpu scheduler specific structs
to per-unit ones.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched_arinc653.c |   2 +-
 xen/common/sched_credit.c   | 101 +++++++++++++-------------
 xen/common/sched_credit2.c  | 168 ++++++++++++++++++++++----------------------
 xen/common/sched_null.c     |  44 ++++++------
 xen/common/sched_rt.c       | 118 +++++++++++++++----------------
 xen/common/schedule.c       |   8 ++-
 xen/include/xen/sched-if.h  |   2 +-
 7 files changed, 220 insertions(+), 223 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 383df750b0..7f26e6a0b0 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == vc )
+    if ( per_cpu(schedule_data, vc->processor).curr == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 7f7596b3bf..bc87b813dc 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -83,7 +83,7 @@
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
     ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
-#define CSCHED_VCPU(_vcpu)  ((struct csched_vcpu *) (_vcpu)->sched_unit->priv)
+#define CSCHED_UNIT(unit)   ((struct csched_unit *) (unit)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
 
@@ -160,7 +160,7 @@ struct csched_pcpu {
 /*
  * Virtual CPU
  */
-struct csched_vcpu {
+struct csched_unit {
     struct list_head runq_elem;
     struct list_head active_vcpu_elem;
 
@@ -231,15 +231,15 @@ static void csched_tick(void *_cpu);
 static void csched_acct(void *dummy);
 
 static inline int
-__vcpu_on_runq(struct csched_vcpu *svc)
+__vcpu_on_runq(struct csched_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static inline struct csched_vcpu *
+static inline struct csched_unit *
 __runq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct csched_vcpu, runq_elem);
+    return list_entry(elem, struct csched_unit, runq_elem);
 }
 
 /* Is the first element of cpu's runq (if any) cpu's idle vcpu? */
@@ -271,7 +271,7 @@ dec_nr_runnable(unsigned int cpu)
 }
 
 static inline void
-__runq_insert(struct csched_vcpu *svc)
+__runq_insert(struct csched_unit *svc)
 {
     unsigned int cpu = svc->vcpu->processor;
     const struct list_head * const runq = RUNQ(cpu);
@@ -281,7 +281,7 @@ __runq_insert(struct csched_vcpu *svc)
 
     list_for_each( iter, runq )
     {
-        const struct csched_vcpu * const iter_svc = __runq_elem(iter);
+        const struct csched_unit * const iter_svc = __runq_elem(iter);
         if ( svc->pri > iter_svc->pri )
             break;
     }
@@ -302,34 +302,34 @@ __runq_insert(struct csched_vcpu *svc)
 }
 
 static inline void
-runq_insert(struct csched_vcpu *svc)
+runq_insert(struct csched_unit *svc)
 {
     __runq_insert(svc);
     inc_nr_runnable(svc->vcpu->processor);
 }
 
 static inline void
-__runq_remove(struct csched_vcpu *svc)
+__runq_remove(struct csched_unit *svc)
 {
     BUG_ON( !__vcpu_on_runq(svc) );
     list_del_init(&svc->runq_elem);
 }
 
 static inline void
-runq_remove(struct csched_vcpu *svc)
+runq_remove(struct csched_unit *svc)
 {
     dec_nr_runnable(svc->vcpu->processor);
     __runq_remove(svc);
 }
 
-static void burn_credits(struct csched_vcpu *svc, s_time_t now)
+static void burn_credits(struct csched_unit *svc, s_time_t now)
 {
     s_time_t delta;
     uint64_t val;
     unsigned int credits;
 
     /* Assert svc is current */
-    ASSERT( svc == CSCHED_VCPU(curr_on_cpu(svc->vcpu->processor)) );
+    ASSERT( svc == CSCHED_UNIT(curr_on_cpu(svc->vcpu->processor)) );
 
     if ( (delta = now - svc->start_time) <= 0 )
         return;
@@ -347,10 +347,10 @@ boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
 
 DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
 
-static inline void __runq_tickle(struct csched_vcpu *new)
+static inline void __runq_tickle(struct csched_unit *new)
 {
     unsigned int cpu = new->vcpu->processor;
-    struct csched_vcpu * const cur = CSCHED_VCPU(curr_on_cpu(cpu));
+    struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
     struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     cpumask_t mask, idle_mask, *online;
     int balance_step, idlers_empty;
@@ -605,7 +605,7 @@ init_pdata(struct csched_private *prv, struct csched_pcpu *spc, int cpu)
     spc->idle_bias = nr_cpu_ids - 1;
 
     /* Start off idling... */
-    BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)));
+    BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)->vcpu));
     cpumask_set_cpu(cpu, prv->idlers);
     spc->nr_runnable = 0;
 }
@@ -637,7 +637,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
-    struct csched_vcpu *svc = vdata;
+    struct csched_unit *svc = vdata;
 
     ASSERT(svc && is_idle_vcpu(svc->vcpu));
 
@@ -660,7 +660,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 static inline void
 __csched_vcpu_check(struct vcpu *vc)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(vc->sched_unit);
     struct csched_dom * const sdom = svc->sdom;
 
     BUG_ON( svc->vcpu != vc );
@@ -862,7 +862,7 @@ static struct sched_resource *
 csched_res_pick(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu *svc = CSCHED_VCPU(vc);
+    struct csched_unit *svc = CSCHED_UNIT(unit);
 
     /*
      * We have been called by vcpu_migrate() (in schedule.c), as part
@@ -876,7 +876,7 @@ csched_res_pick(const struct scheduler *ops, struct sched_unit *unit)
 }
 
 static inline void
-__csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc)
+__csched_vcpu_acct_start(struct csched_private *prv, struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
     unsigned long flags;
@@ -906,7 +906,7 @@ __csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc)
 
 static inline void
 __csched_vcpu_acct_stop_locked(struct csched_private *prv,
-    struct csched_vcpu *svc)
+    struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
@@ -931,7 +931,7 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
 static void
 csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(current);
+    struct csched_unit * const svc = CSCHED_UNIT(current->sched_unit);
     const struct scheduler *ops = per_cpu(scheduler, cpu);
 
     ASSERT( current->processor == cpu );
@@ -1000,10 +1000,10 @@ csched_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit,
                    void *dd)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct csched_vcpu);
+    svc = xzalloc(struct csched_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -1022,7 +1022,7 @@ static void
 csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu *svc = unit->priv;
+    struct csched_unit *svc = unit->priv;
     spinlock_t *lock;
 
     BUG_ON( is_idle_vcpu(vc) );
@@ -1048,7 +1048,7 @@ csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 static void
 csched_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct csched_vcpu *svc = priv;
+    struct csched_unit *svc = priv;
 
     BUG_ON( !list_empty(&svc->runq_elem) );
 
@@ -1059,8 +1059,7 @@ static void
 csched_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     struct csched_dom * const sdom = svc->sdom;
 
     SCHED_STAT_CRANK(vcpu_remove);
@@ -1087,14 +1086,14 @@ static void
 csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     unsigned int cpu = vc->processor;
 
     SCHED_STAT_CRANK(vcpu_sleep);
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( curr_on_cpu(cpu) == vc )
+    if ( curr_on_cpu(cpu) == unit )
     {
         /*
          * We are about to tickle cpu, so we should clear its bit in idlers.
@@ -1112,12 +1111,12 @@ static void
 csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     bool_t migrating;
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( unlikely(curr_on_cpu(vc->processor) == vc) )
+    if ( unlikely(curr_on_cpu(vc->processor) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -1173,8 +1172,7 @@ csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 static void
 csched_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
 
     /* Let the scheduler know that this vcpu is trying to yield */
     set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags);
@@ -1229,8 +1227,7 @@ static void
 csched_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
                 const cpumask_t *hard, const cpumask_t *soft)
 {
-    struct vcpu *v = unit->vcpu;
-    struct csched_vcpu *svc = CSCHED_VCPU(v);
+    struct csched_unit *svc = CSCHED_UNIT(unit);
 
     if ( !hard )
         return;
@@ -1333,7 +1330,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu)
 {
     struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
     struct list_head *runq, *elem, *next, *last_under;
-    struct csched_vcpu *svc_elem;
+    struct csched_unit *svc_elem;
     spinlock_t *lock;
     unsigned long flags;
     int sort_epoch;
@@ -1379,7 +1376,7 @@ csched_acct(void* dummy)
     unsigned long flags;
     struct list_head *iter_vcpu, *next_vcpu;
     struct list_head *iter_sdom, *next_sdom;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
     struct csched_dom *sdom;
     uint32_t credit_total;
     uint32_t weight_total;
@@ -1502,7 +1499,7 @@ csched_acct(void* dummy)
 
         list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
         {
-            svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem);
+            svc = list_entry(iter_vcpu, struct csched_unit, active_vcpu_elem);
             BUG_ON( sdom != svc->sdom );
 
             /* Increment credit */
@@ -1606,12 +1603,12 @@ csched_tick(void *_cpu)
     set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) );
 }
 
-static struct csched_vcpu *
+static struct csched_unit *
 csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
 {
     const struct csched_private * const prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
-    struct csched_vcpu *speer;
+    struct csched_unit *speer;
     struct list_head *iter;
     struct vcpu *vc;
 
@@ -1621,7 +1618,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
      * Don't steal from an idle CPU's runq because it's about to
      * pick up work from it itself.
      */
-    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
+    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu)->vcpu)) )
         goto out;
 
     list_for_each( iter, &peer_pcpu->runq )
@@ -1683,12 +1680,12 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
     return NULL;
 }
 
-static struct csched_vcpu *
+static struct csched_unit *
 csched_load_balance(struct csched_private *prv, int cpu,
-    struct csched_vcpu *snext, bool_t *stolen)
+    struct csched_unit *snext, bool_t *stolen)
 {
     struct cpupool *c = per_cpu(cpupool, cpu);
-    struct csched_vcpu *speer;
+    struct csched_unit *speer;
     cpumask_t workers;
     cpumask_t *online;
     int peer_cpu, first_cpu, peer_node, bstep;
@@ -1837,9 +1834,9 @@ csched_schedule(
 {
     const int cpu = smp_processor_id();
     struct list_head * const runq = RUNQ(cpu);
-    struct csched_vcpu * const scurr = CSCHED_VCPU(current);
+    struct csched_unit * const scurr = CSCHED_UNIT(current->sched_unit);
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct csched_vcpu *snext;
+    struct csched_unit *snext;
     struct task_slice ret;
     s_time_t runtime, tslice;
 
@@ -1955,7 +1952,7 @@ csched_schedule(
     if ( tasklet_work_scheduled )
     {
         TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
-        snext = CSCHED_VCPU(idle_vcpu[cpu]);
+        snext = CSCHED_UNIT(idle_vcpu[cpu]->sched_unit);
         snext->pri = CSCHED_PRI_TS_BOOST;
     }
 
@@ -2007,7 +2004,7 @@ out:
 }
 
 static void
-csched_dump_vcpu(struct csched_vcpu *svc)
+csched_dump_vcpu(struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
@@ -2043,7 +2040,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     struct list_head *runq, *iter;
     struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_pcpu *spc;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
     spinlock_t *lock;
     unsigned long flags;
     int loop;
@@ -2067,7 +2064,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
            nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu). */
-    svc = CSCHED_VCPU(curr_on_cpu(cpu));
+    svc = CSCHED_UNIT(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         printk("\trun: ");
@@ -2136,10 +2133,10 @@ csched_dump(const struct scheduler *ops)
 
         list_for_each( iter_svc, &sdom->active_vcpu )
         {
-            struct csched_vcpu *svc;
+            struct csched_unit *svc;
             spinlock_t *lock;
 
-            svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem);
+            svc = list_entry(iter_svc, struct csched_unit, active_vcpu_elem);
             lock = vcpu_schedule_lock(svc->vcpu);
 
             printk("\t%3d: ", ++loop);
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index b71802bba2..36201815e3 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -176,7 +176,7 @@
  *     load balancing;
  *  + serializes runqueue operations (removing and inserting vcpus);
  *  + protects runqueue-wide data in csched2_runqueue_data;
- *  + protects vcpu parameters in csched2_vcpu for the vcpu in the
+ *  + protects vcpu parameters in csched2_unit for the vcpu in the
  *    runqueue.
  *
  * - Private scheduler lock
@@ -511,7 +511,7 @@ struct csched2_pcpu {
 /*
  * Virtual CPU
  */
-struct csched2_vcpu {
+struct csched2_unit {
     struct csched2_dom *sdom;          /* Up-pointer to domain                */
     struct vcpu *vcpu;                 /* Up-pointer, to vcpu                 */
     struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue          */
@@ -570,9 +570,9 @@ static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
     return per_cpu(schedule_data, cpu).sched_priv;
 }
 
-static inline struct csched2_vcpu *csched2_vcpu(const struct vcpu *v)
+static inline struct csched2_unit *csched2_unit(const struct sched_unit *unit)
 {
-    return v->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline struct csched2_dom *csched2_dom(const struct domain *d)
@@ -594,7 +594,7 @@ static inline struct csched2_runqueue_data *c2rqd(const struct scheduler *ops,
 }
 
 /* Does the domain of this vCPU have a cap? */
-static inline bool has_cap(const struct csched2_vcpu *svc)
+static inline bool has_cap(const struct csched2_unit *svc)
 {
     return svc->budget != STIME_MAX;
 }
@@ -688,7 +688,7 @@ void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask)
  * Of course, 1, 2 and 3 makes sense only if svc has a soft affinity. Also
  * note that at least 5 is guaranteed to _always_ return at least one pcpu.
  */
-static int get_fallback_cpu(struct csched2_vcpu *svc)
+static int get_fallback_cpu(struct csched2_unit *svc)
 {
     struct vcpu *v = svc->vcpu;
     unsigned int bs;
@@ -773,7 +773,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc)
  * FIXME: Do pre-calculated division?
  */
 static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
-                          struct csched2_vcpu *svc)
+                          struct csched2_unit *svc)
 {
     uint64_t val = time * rqd->max_weight + svc->residual;
 
@@ -781,7 +781,7 @@ static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
     svc->credit -= val;
 }
 
-static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_vcpu *svc)
+static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_unit *svc)
 {
     return credit * svc->weight / rqd->max_weight;
 }
@@ -790,14 +790,14 @@ static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct c
  * Runqueue related code.
  */
 
-static inline int vcpu_on_runq(struct csched2_vcpu *svc)
+static inline int vcpu_on_runq(struct csched2_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static inline struct csched2_vcpu * runq_elem(struct list_head *elem)
+static inline struct csched2_unit * runq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct csched2_vcpu, runq_elem);
+    return list_entry(elem, struct csched2_unit, runq_elem);
 }
 
 static void activate_runqueue(struct csched2_private *prv, int rqi)
@@ -915,7 +915,7 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight,
 
         list_for_each( iter, &rqd->svc )
         {
-            struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
+            struct csched2_unit * svc = list_entry(iter, struct csched2_unit, rqd_elem);
 
             if ( svc->weight > max_weight )
                 max_weight = svc->weight;
@@ -940,7 +940,7 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight,
 
 /* Add and remove from runqueue assignment (not active run queue) */
 static void
-_runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
+_runq_assign(struct csched2_unit *svc, struct csched2_runqueue_data *rqd)
 {
 
     svc->rqd = rqd;
@@ -970,7 +970,7 @@ _runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
 static void
 runq_assign(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu *svc = vc->sched_unit->priv;
+    struct csched2_unit *svc = vc->sched_unit->priv;
 
     ASSERT(svc->rqd == NULL);
 
@@ -978,7 +978,7 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc)
 }
 
 static void
-_runq_deassign(struct csched2_vcpu *svc)
+_runq_deassign(struct csched2_unit *svc)
 {
     struct csched2_runqueue_data *rqd = svc->rqd;
 
@@ -997,7 +997,7 @@ _runq_deassign(struct csched2_vcpu *svc)
 static void
 runq_deassign(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu *svc = vc->sched_unit->priv;
+    struct csched2_unit *svc = vc->sched_unit->priv;
 
     ASSERT(svc->rqd == c2rqd(ops, vc->processor));
 
@@ -1199,7 +1199,7 @@ update_runq_load(const struct scheduler *ops,
 
 static void
 update_svc_load(const struct scheduler *ops,
-                struct csched2_vcpu *svc, int change, s_time_t now)
+                struct csched2_unit *svc, int change, s_time_t now)
 {
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, vcpu_load;
@@ -1259,7 +1259,7 @@ update_svc_load(const struct scheduler *ops,
 static void
 update_load(const struct scheduler *ops,
             struct csched2_runqueue_data *rqd,
-            struct csched2_vcpu *svc, int change, s_time_t now)
+            struct csched2_unit *svc, int change, s_time_t now)
 {
     trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0,  NULL);
 
@@ -1269,7 +1269,7 @@ update_load(const struct scheduler *ops,
 }
 
 static void
-runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct csched2_unit *svc)
 {
     struct list_head *iter;
     unsigned int cpu = svc->vcpu->processor;
@@ -1288,7 +1288,7 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
 
     list_for_each( iter, runq )
     {
-        struct csched2_vcpu * iter_svc = runq_elem(iter);
+        struct csched2_unit * iter_svc = runq_elem(iter);
 
         if ( svc->credit > iter_svc->credit )
             break;
@@ -1312,13 +1312,13 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
     }
 }
 
-static inline void runq_remove(struct csched2_vcpu *svc)
+static inline void runq_remove(struct csched2_unit *svc)
 {
     ASSERT(vcpu_on_runq(svc));
     list_del_init(&svc->runq_elem);
 }
 
-void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, s_time_t);
+void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_unit *, s_time_t);
 
 static inline void
 tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd)
@@ -1334,7 +1334,7 @@ tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd)
  * whether or not it already run for more than the ratelimit, to which we
  * apply some tolerance).
  */
-static inline bool is_preemptable(const struct csched2_vcpu *svc,
+static inline bool is_preemptable(const struct csched2_unit *svc,
                                     s_time_t now, s_time_t ratelimit)
 {
     if ( ratelimit <= CSCHED2_RATELIMIT_TICKLE_TOLERANCE )
@@ -1360,10 +1360,10 @@ static inline bool is_preemptable(const struct csched2_vcpu *svc,
  * Within the same class, the highest difference of credit.
  */
 static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
-                             struct csched2_vcpu *new, unsigned int cpu)
+                             struct csched2_unit *new, unsigned int cpu)
 {
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
-    struct csched2_vcpu * cur = csched2_vcpu(curr_on_cpu(cpu));
+    struct csched2_unit * cur = csched2_unit(curr_on_cpu(cpu));
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t score;
 
@@ -1432,7 +1432,7 @@ static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
  * pick up some work, so it would be wrong to consider it idle.
  */
 static void
-runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
+runq_tickle(const struct scheduler *ops, struct csched2_unit *new, s_time_t now)
 {
     int i, ipid = -1;
     s_time_t max = 0;
@@ -1587,7 +1587,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
         return;
     }
 
-    ASSERT(!is_idle_vcpu(curr_on_cpu(ipid)));
+    ASSERT(!is_idle_vcpu(curr_on_cpu(ipid)->vcpu));
     SCHED_STAT_CRANK(tickled_busy_cpu);
  tickle:
     BUG_ON(ipid == -1);
@@ -1614,7 +1614,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
  * Credit-related code
  */
 static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
-                         struct csched2_vcpu *snext)
+                         struct csched2_unit *snext)
 {
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     struct list_head *iter;
@@ -1644,10 +1644,10 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
     list_for_each( iter, &rqd->svc )
     {
         unsigned int svc_cpu;
-        struct csched2_vcpu * svc;
+        struct csched2_unit * svc;
         int start_credit;
 
-        svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
+        svc = list_entry(iter, struct csched2_unit, rqd_elem);
         svc_cpu = svc->vcpu->processor;
 
         ASSERT(!is_idle_vcpu(svc->vcpu));
@@ -1657,7 +1657,7 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
          * If svc is running, it is our responsibility to make sure, here,
          * that the credit it has spent so far get accounted.
          */
-        if ( svc->vcpu == curr_on_cpu(svc_cpu) )
+        if ( svc->vcpu == curr_on_cpu(svc_cpu)->vcpu )
         {
             burn_credits(rqd, svc, now);
             /*
@@ -1709,11 +1709,11 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
 }
 
 void burn_credits(struct csched2_runqueue_data *rqd,
-                  struct csched2_vcpu *svc, s_time_t now)
+                  struct csched2_unit *svc, s_time_t now)
 {
     s_time_t delta;
 
-    ASSERT(svc == csched2_vcpu(curr_on_cpu(svc->vcpu->processor)));
+    ASSERT(svc == csched2_unit(curr_on_cpu(svc->vcpu->processor)));
 
     if ( unlikely(is_idle_vcpu(svc->vcpu)) )
     {
@@ -1763,7 +1763,7 @@ void burn_credits(struct csched2_runqueue_data *rqd,
  * Budget-related code.
  */
 
-static void park_vcpu(struct csched2_vcpu *svc)
+static void park_vcpu(struct csched2_unit *svc)
 {
     struct vcpu *v = svc->vcpu;
 
@@ -1792,7 +1792,7 @@ static void park_vcpu(struct csched2_vcpu *svc)
     list_add(&svc->parked_elem, &svc->sdom->parked_vcpus);
 }
 
-static bool vcpu_grab_budget(struct csched2_vcpu *svc)
+static bool vcpu_grab_budget(struct csched2_unit *svc)
 {
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
@@ -1839,7 +1839,7 @@ static bool vcpu_grab_budget(struct csched2_vcpu *svc)
 }
 
 static void
-vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked)
+vcpu_return_budget(struct csched2_unit *svc, struct list_head *parked)
 {
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
@@ -1882,7 +1882,7 @@ vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked)
 static void
 unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
 {
-    struct csched2_vcpu *svc, *tmp;
+    struct csched2_unit *svc, *tmp;
     spinlock_t *lock;
 
     list_for_each_entry_safe(svc, tmp, vcpus, parked_elem)
@@ -2004,7 +2004,7 @@ static void replenish_domain_budget(void* data)
 static inline void
 csched2_vcpu_check(struct vcpu *vc)
 {
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(vc->sched_unit);
     struct csched2_dom * const sdom = svc->sdom;
 
     BUG_ON( svc->vcpu != vc );
@@ -2030,10 +2030,10 @@ csched2_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit,
                     void *dd)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu *svc;
+    struct csched2_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct csched2_vcpu);
+    svc = xzalloc(struct csched2_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -2074,12 +2074,12 @@ static void
 csched2_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
 
     ASSERT(!is_idle_vcpu(vc));
     SCHED_STAT_CRANK(vcpu_sleep);
 
-    if ( curr_on_cpu(vc->processor) == vc )
+    if ( curr_on_cpu(vc->processor) == unit )
     {
         tickle_cpu(vc->processor, svc->rqd);
     }
@@ -2097,7 +2097,7 @@ static void
 csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     unsigned int cpu = vc->processor;
     s_time_t now;
 
@@ -2105,7 +2105,7 @@ csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 
     ASSERT(!is_idle_vcpu(vc));
 
-    if ( unlikely(curr_on_cpu(cpu) == vc) )
+    if ( unlikely(curr_on_cpu(cpu) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         goto out;
@@ -2152,8 +2152,7 @@ out:
 static void
 csched2_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *v = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(v);
+    struct csched2_unit * const svc = csched2_unit(unit);
 
     __set_bit(__CSFLAG_vcpu_yield, &svc->flags);
 }
@@ -2162,7 +2161,7 @@ static void
 csched2_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
     s_time_t now = NOW();
     LIST_HEAD(were_parked);
@@ -2208,7 +2207,7 @@ csched2_res_pick(const struct scheduler *ops, struct sched_unit *unit)
     struct vcpu *vc = unit->vcpu;
     int i, min_rqi = -1, min_s_rqi = -1;
     unsigned int new_cpu, cpu = vc->processor;
-    struct csched2_vcpu *svc = csched2_vcpu(vc);
+    struct csched2_unit *svc = csched2_unit(unit);
     s_time_t min_avgload = MAX_LOAD, min_s_avgload = MAX_LOAD;
     bool has_soft;
 
@@ -2430,15 +2429,15 @@ csched2_res_pick(const struct scheduler *ops, struct sched_unit *unit)
 typedef struct {
     /* NB: Modified by consider() */
     s_time_t load_delta;
-    struct csched2_vcpu * best_push_svc, *best_pull_svc;
+    struct csched2_unit * best_push_svc, *best_pull_svc;
     /* NB: Read by consider() */
     struct csched2_runqueue_data *lrqd;
     struct csched2_runqueue_data *orqd;                  
 } balance_state_t;
 
 static void consider(balance_state_t *st, 
-                     struct csched2_vcpu *push_svc,
-                     struct csched2_vcpu *pull_svc)
+                     struct csched2_unit *push_svc,
+                     struct csched2_unit *pull_svc)
 {
     s_time_t l_load, o_load, delta;
 
@@ -2471,8 +2470,8 @@ static void consider(balance_state_t *st,
 
 
 static void migrate(const struct scheduler *ops,
-                    struct csched2_vcpu *svc, 
-                    struct csched2_runqueue_data *trqd, 
+                    struct csched2_unit *svc,
+                    struct csched2_runqueue_data *trqd,
                     s_time_t now)
 {
     int cpu = svc->vcpu->processor;
@@ -2541,7 +2540,7 @@ static void migrate(const struct scheduler *ops,
  *  - svc is not already flagged to migrate,
  *  - if svc is allowed to run on at least one of the pcpus of rqd.
  */
-static bool vcpu_is_migrateable(struct csched2_vcpu *svc,
+static bool vcpu_is_migrateable(struct csched2_unit *svc,
                                   struct csched2_runqueue_data *rqd)
 {
     struct vcpu *v = svc->vcpu;
@@ -2691,7 +2690,7 @@ retry:
     /* Reuse load delta (as we're trying to minimize it) */
     list_for_each( push_iter, &st.lrqd->svc )
     {
-        struct csched2_vcpu * push_svc = list_entry(push_iter, struct csched2_vcpu, rqd_elem);
+        struct csched2_unit * push_svc = list_entry(push_iter, struct csched2_unit, rqd_elem);
 
         update_svc_load(ops, push_svc, 0, now);
 
@@ -2700,7 +2699,7 @@ retry:
 
         list_for_each( pull_iter, &st.orqd->svc )
         {
-            struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem);
+            struct csched2_unit * pull_svc = list_entry(pull_iter, struct csched2_unit, rqd_elem);
             
             if ( !inner_load_updated )
                 update_svc_load(ops, pull_svc, 0, now);
@@ -2719,7 +2718,7 @@ retry:
 
     list_for_each( pull_iter, &st.orqd->svc )
     {
-        struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem);
+        struct csched2_unit * pull_svc = list_entry(pull_iter, struct csched2_unit, rqd_elem);
         
         if ( !vcpu_is_migrateable(pull_svc, st.lrqd) )
             continue;
@@ -2746,7 +2745,7 @@ csched2_unit_migrate(
 {
     struct vcpu *vc = unit->vcpu;
     struct domain *d = vc->domain;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     struct csched2_runqueue_data *trqd;
     s_time_t now = NOW();
 
@@ -2847,7 +2846,7 @@ csched2_dom_cntl(
             /* Update weights for vcpus, and max_weight for runqueues on which they reside */
             for_each_vcpu ( d, v )
             {
-                struct csched2_vcpu *svc = csched2_vcpu(v);
+                struct csched2_unit *svc = csched2_unit(v->sched_unit);
                 spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
 
                 ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
@@ -2861,7 +2860,7 @@ csched2_dom_cntl(
         /* Cap */
         if ( op->u.credit2.cap != 0 )
         {
-            struct csched2_vcpu *svc;
+            struct csched2_unit *svc;
             spinlock_t *lock;
 
             /* Cap is only valid if it's below 100 * nr_of_vCPUS */
@@ -2885,7 +2884,7 @@ csched2_dom_cntl(
              */
             for_each_vcpu ( d, v )
             {
-                svc = csched2_vcpu(v);
+                svc = csched2_unit(v->sched_unit);
                 lock = vcpu_schedule_lock(svc->vcpu);
                 /*
                  * Too small quotas would in theory cause a lot of overhead,
@@ -2928,14 +2927,14 @@ csched2_dom_cntl(
                  */
                 for_each_vcpu ( d, v )
                 {
-                    svc = csched2_vcpu(v);
+                    svc = csched2_unit(v->sched_unit);
                     lock = vcpu_schedule_lock(svc->vcpu);
                     if ( v->is_running )
                     {
                         unsigned int cpu = v->processor;
                         struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
 
-                        ASSERT(curr_on_cpu(cpu) == v);
+                        ASSERT(curr_on_cpu(cpu)->vcpu == v);
 
                         /*
                          * We are triggering a reschedule on the vCPU's
@@ -2975,7 +2974,7 @@ csched2_dom_cntl(
             /* Disable budget accounting for all the vCPUs. */
             for_each_vcpu ( d, v )
             {
-                struct csched2_vcpu *svc = csched2_vcpu(v);
+                struct csched2_unit *svc = csched2_unit(v->sched_unit);
                 spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
 
                 svc->budget = STIME_MAX;
@@ -3012,8 +3011,7 @@ static void
 csched2_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
                  const cpumask_t *hard, const cpumask_t *soft)
 {
-    struct vcpu *v = unit->vcpu;
-    struct csched2_vcpu *svc = csched2_vcpu(v);
+    struct csched2_unit *svc = csched2_unit(unit);
 
     if ( !hard )
         return;
@@ -3113,7 +3111,7 @@ static void
 csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu *svc = unit->priv;
+    struct csched2_unit *svc = unit->priv;
     struct csched2_dom * const sdom = svc->sdom;
     spinlock_t *lock;
 
@@ -3145,7 +3143,7 @@ csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 static void
 csched2_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct csched2_vcpu *svc = priv;
+    struct csched2_unit *svc = priv;
 
     xfree(svc);
 }
@@ -3154,7 +3152,7 @@ static void
 csched2_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     spinlock_t *lock;
 
     ASSERT(!is_idle_vcpu(vc));
@@ -3175,7 +3173,7 @@ csched2_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 /* How long should we let this vcpu run for? */
 static s_time_t
 csched2_runtime(const struct scheduler *ops, int cpu,
-                struct csched2_vcpu *snext, s_time_t now)
+                struct csched2_unit *snext, s_time_t now)
 {
     s_time_t time, min_time;
     int rt_credit; /* Proposed runtime measured in credits */
@@ -3220,7 +3218,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
      */
     if ( ! list_empty(runq) )
     {
-        struct csched2_vcpu *swait = runq_elem(runq->next);
+        struct csched2_unit *swait = runq_elem(runq->next);
 
         if ( ! is_idle_vcpu(swait->vcpu)
              && swait->credit > 0 )
@@ -3271,14 +3269,14 @@ csched2_runtime(const struct scheduler *ops, int cpu,
 /*
  * Find a candidate.
  */
-static struct csched2_vcpu *
+static struct csched2_unit *
 runq_candidate(struct csched2_runqueue_data *rqd,
-               struct csched2_vcpu *scurr,
+               struct csched2_unit *scurr,
                int cpu, s_time_t now,
                unsigned int *skipped)
 {
     struct list_head *iter, *temp;
-    struct csched2_vcpu *snext = NULL;
+    struct csched2_unit *snext = NULL;
     struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
     bool yield = false, soft_aff_preempt = false;
 
@@ -3359,12 +3357,12 @@ runq_candidate(struct csched2_runqueue_data *rqd,
     if ( vcpu_runnable(scurr->vcpu) && !soft_aff_preempt )
         snext = scurr;
     else
-        snext = csched2_vcpu(idle_vcpu[cpu]);
+        snext = csched2_unit(idle_vcpu[cpu]->sched_unit);
 
  check_runq:
     list_for_each_safe( iter, temp, &rqd->runq )
     {
-        struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, runq_elem);
+        struct csched2_unit * svc = list_entry(iter, struct csched2_unit, runq_elem);
 
         if ( unlikely(tb_init_done) )
         {
@@ -3463,8 +3461,8 @@ csched2_schedule(
 {
     const int cpu = smp_processor_id();
     struct csched2_runqueue_data *rqd;
-    struct csched2_vcpu * const scurr = csched2_vcpu(current);
-    struct csched2_vcpu *snext = NULL;
+    struct csched2_unit * const scurr = csched2_unit(current->sched_unit);
+    struct csched2_unit *snext = NULL;
     unsigned int skipped_vcpus = 0;
     struct task_slice ret;
     bool tickled;
@@ -3540,7 +3538,7 @@ csched2_schedule(
     {
         __clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
         trace_var(TRC_CSCHED2_SCHED_TASKLET, 1, 0, NULL);
-        snext = csched2_vcpu(idle_vcpu[cpu]);
+        snext = csched2_unit(idle_vcpu[cpu]->sched_unit);
     }
     else
         snext = runq_candidate(rqd, scurr, cpu, now, &skipped_vcpus);
@@ -3643,7 +3641,7 @@ csched2_schedule(
 }
 
 static void
-csched2_dump_vcpu(struct csched2_private *prv, struct csched2_vcpu *svc)
+csched2_dump_vcpu(struct csched2_private *prv, struct csched2_unit *svc)
 {
     printk("[%i.%i] flags=%x cpu=%i",
             svc->vcpu->domain->domain_id,
@@ -3667,7 +3665,7 @@ static inline void
 dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct csched2_private *prv = csched2_priv(ops);
-    struct csched2_vcpu *svc;
+    struct csched2_unit *svc;
 
     printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
            cpu, c2r(cpu),
@@ -3675,7 +3673,7 @@ dump_pcpu(const struct scheduler *ops, int cpu)
            nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
-    svc = csched2_vcpu(curr_on_cpu(cpu));
+    svc = csched2_unit(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         printk("\trun: ");
@@ -3748,7 +3746,7 @@ csched2_dump(const struct scheduler *ops)
 
         for_each_vcpu( sdom->dom, v )
         {
-            struct csched2_vcpu * const svc = csched2_vcpu(v);
+            struct csched2_unit * const svc = csched2_unit(v->sched_unit);
             spinlock_t *lock;
 
             lock = vcpu_schedule_lock(svc->vcpu);
@@ -3777,7 +3775,7 @@ csched2_dump(const struct scheduler *ops)
         printk("RUNQ:\n");
         list_for_each( iter, runq )
         {
-            struct csched2_vcpu *svc = runq_elem(iter);
+            struct csched2_unit *svc = runq_elem(iter);
 
             if ( svc )
             {
@@ -3878,7 +3876,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                      void *pdata, void *vdata)
 {
     struct csched2_private *prv = csched2_priv(new_ops);
-    struct csched2_vcpu *svc = vdata;
+    struct csched2_unit *svc = vdata;
     unsigned rqi;
 
     ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 1ea8643a9c..defcdfcf1e 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -94,7 +94,7 @@ DEFINE_PER_CPU(struct null_pcpu, npc);
 /*
  * Virtual CPU
  */
-struct null_vcpu {
+struct null_unit {
     struct list_head waitq_elem;
     struct vcpu *vcpu;
 };
@@ -115,9 +115,9 @@ static inline struct null_private *null_priv(const struct scheduler *ops)
     return ops->sched_data;
 }
 
-static inline struct null_vcpu *null_vcpu(const struct vcpu *v)
+static inline struct null_unit *null_unit(const struct sched_unit *unit)
 {
-    return v->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
@@ -197,9 +197,9 @@ static void *null_alloc_vdata(const struct scheduler *ops,
                               struct sched_unit *unit, void *dd)
 {
     struct vcpu *v = unit->vcpu;
-    struct null_vcpu *nvc;
+    struct null_unit *nvc;
 
-    nvc = xzalloc(struct null_vcpu);
+    nvc = xzalloc(struct null_unit);
     if ( nvc == NULL )
         return NULL;
 
@@ -213,7 +213,7 @@ static void *null_alloc_vdata(const struct scheduler *ops,
 
 static void null_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct null_vcpu *nvc = priv;
+    struct null_unit *nvc = priv;
 
     xfree(nvc);
 }
@@ -391,7 +391,7 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
 {
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     struct null_private *prv = null_priv(new_ops);
-    struct null_vcpu *nvc = vdata;
+    struct null_unit *nvc = vdata;
 
     ASSERT(nvc && is_idle_vcpu(nvc->vcpu));
 
@@ -414,7 +414,7 @@ static void null_unit_insert(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
     unsigned int cpu;
     spinlock_t *lock;
 
@@ -471,9 +471,9 @@ static void _vcpu_remove(struct null_private *prv, struct vcpu *v)
 {
     unsigned int bs;
     unsigned int cpu = v->processor;
-    struct null_vcpu *wvc;
+    struct null_unit *wvc;
 
-    ASSERT(list_empty(&null_vcpu(v)->waitq_elem));
+    ASSERT(list_empty(&null_unit(v->sched_unit)->waitq_elem));
 
     vcpu_deassign(prv, v, cpu);
 
@@ -509,7 +509,7 @@ static void null_unit_remove(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
     spinlock_t *lock;
 
     ASSERT(!is_idle_vcpu(v));
@@ -544,13 +544,13 @@ static void null_unit_wake(const struct scheduler *ops,
 
     ASSERT(!is_idle_vcpu(v));
 
-    if ( unlikely(curr_on_cpu(v->processor) == v) )
+    if ( unlikely(curr_on_cpu(v->processor) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
     }
 
-    if ( unlikely(!list_empty(&null_vcpu(v)->waitq_elem)) )
+    if ( unlikely(!list_empty(&null_unit(unit)->waitq_elem)) )
     {
         /* Not exactly "on runq", but close enough for reusing the counter */
         SCHED_STAT_CRANK(vcpu_wake_onrunq);
@@ -574,7 +574,7 @@ static void null_unit_sleep(const struct scheduler *ops,
     ASSERT(!is_idle_vcpu(v));
 
     /* If v is not assigned to a pCPU, or is not running, no need to bother */
-    if ( curr_on_cpu(v->processor) == v )
+    if ( curr_on_cpu(v->processor) == unit )
         cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
 
     SCHED_STAT_CRANK(vcpu_sleep);
@@ -592,7 +592,7 @@ static void null_unit_migrate(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
 
     ASSERT(!is_idle_vcpu(v));
 
@@ -677,7 +677,7 @@ static void null_unit_migrate(const struct scheduler *ops,
 #ifndef NDEBUG
 static inline void null_vcpu_check(struct vcpu *v)
 {
-    struct null_vcpu * const nvc = null_vcpu(v);
+    struct null_unit * const nvc = null_unit(v->sched_unit);
     struct null_dom * const ndom = v->domain->sched_priv;
 
     BUG_ON(nvc->vcpu != v);
@@ -707,7 +707,7 @@ static struct task_slice null_schedule(const struct scheduler *ops,
     unsigned int bs;
     const unsigned int cpu = smp_processor_id();
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *wvc;
+    struct null_unit *wvc;
     struct task_slice ret;
 
     SCHED_STAT_CRANK(schedule);
@@ -790,7 +790,7 @@ static struct task_slice null_schedule(const struct scheduler *ops,
     return ret;
 }
 
-static inline void dump_vcpu(struct null_private *prv, struct null_vcpu *nvc)
+static inline void dump_vcpu(struct null_private *prv, struct null_unit *nvc)
 {
     printk("[%i.%i] pcpu=%d", nvc->vcpu->domain->domain_id,
             nvc->vcpu->vcpu_id, list_empty(&nvc->waitq_elem) ?
@@ -800,7 +800,7 @@ static inline void dump_vcpu(struct null_private *prv, struct null_vcpu *nvc)
 static void null_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc;
+    struct null_unit *nvc;
     spinlock_t *lock;
     unsigned long flags;
 
@@ -815,7 +815,7 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
     printk("\n");
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
-    nvc = null_vcpu(curr_on_cpu(cpu));
+    nvc = null_unit(curr_on_cpu(cpu));
     if ( nvc && !is_idle_vcpu(nvc->vcpu) )
     {
         printk("\trun: ");
@@ -849,7 +849,7 @@ static void null_dump(const struct scheduler *ops)
         printk("\tDomain: %d\n", ndom->dom->domain_id);
         for_each_vcpu( ndom->dom, v )
         {
-            struct null_vcpu * const nvc = null_vcpu(v);
+            struct null_unit * const nvc = null_unit(v->sched_unit);
             spinlock_t *lock;
 
             lock = vcpu_schedule_lock(nvc->vcpu);
@@ -867,7 +867,7 @@ static void null_dump(const struct scheduler *ops)
     spin_lock(&prv->waitq_lock);
     list_for_each( iter, &prv->waitq )
     {
-        struct null_vcpu *nvc = list_entry(iter, struct null_vcpu, waitq_elem);
+        struct null_unit *nvc = list_entry(iter, struct null_unit, waitq_elem);
 
         if ( loop++ != 0 )
             printk(", ");
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index b80bc02357..8caec5c5dc 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -195,7 +195,7 @@ struct rt_private {
 /*
  * Virtual CPU
  */
-struct rt_vcpu {
+struct rt_unit {
     struct list_head q_elem;     /* on the runq/depletedq list */
     struct list_head replq_elem; /* on the replenishment events list */
 
@@ -233,9 +233,9 @@ static inline struct rt_private *rt_priv(const struct scheduler *ops)
     return ops->sched_data;
 }
 
-static inline struct rt_vcpu *rt_vcpu(const struct vcpu *vcpu)
+static inline struct rt_unit *rt_unit(const struct sched_unit *unit)
 {
-    return vcpu->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline struct list_head *rt_runq(const struct scheduler *ops)
@@ -253,7 +253,7 @@ static inline struct list_head *rt_replq(const struct scheduler *ops)
     return &rt_priv(ops)->replq;
 }
 
-static inline bool has_extratime(const struct rt_vcpu *svc)
+static inline bool has_extratime(const struct rt_unit *svc)
 {
     return svc->flags & RTDS_extratime;
 }
@@ -263,25 +263,25 @@ static inline bool has_extratime(const struct rt_vcpu *svc)
  * and the replenishment events queue.
  */
 static int
-vcpu_on_q(const struct rt_vcpu *svc)
+vcpu_on_q(const struct rt_unit *svc)
 {
    return !list_empty(&svc->q_elem);
 }
 
-static struct rt_vcpu *
+static struct rt_unit *
 q_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct rt_vcpu, q_elem);
+    return list_entry(elem, struct rt_unit, q_elem);
 }
 
-static struct rt_vcpu *
+static struct rt_unit *
 replq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct rt_vcpu, replq_elem);
+    return list_entry(elem, struct rt_unit, replq_elem);
 }
 
 static int
-vcpu_on_replq(const struct rt_vcpu *svc)
+vcpu_on_replq(const struct rt_unit *svc)
 {
     return !list_empty(&svc->replq_elem);
 }
@@ -291,7 +291,7 @@ vcpu_on_replq(const struct rt_vcpu *svc)
  * Otherwise, return value < 0
  */
 static s_time_t
-compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2)
+compare_vcpu_priority(const struct rt_unit *v1, const struct rt_unit *v2)
 {
     int prio = v2->priority_level - v1->priority_level;
 
@@ -305,7 +305,7 @@ compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2)
  * Debug related code, dump vcpu/cpu information
  */
 static void
-rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
+rt_dump_vcpu(const struct scheduler *ops, const struct rt_unit *svc)
 {
     cpumask_t *cpupool_mask, *mask;
 
@@ -352,13 +352,13 @@ static void
 rt_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     unsigned long flags;
 
     spin_lock_irqsave(&prv->lock, flags);
     printk("CPU[%02d]\n", cpu);
     /* current VCPU (nothing to say if that's the idle vcpu). */
-    svc = rt_vcpu(curr_on_cpu(cpu));
+    svc = rt_unit(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         rt_dump_vcpu(ops, svc);
@@ -371,7 +371,7 @@ rt_dump(const struct scheduler *ops)
 {
     struct list_head *runq, *depletedq, *replq, *iter;
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     struct rt_dom *sdom;
     unsigned long flags;
 
@@ -415,7 +415,7 @@ rt_dump(const struct scheduler *ops)
 
         for_each_vcpu ( sdom->dom, v )
         {
-            svc = rt_vcpu(v);
+            svc = rt_unit(v->sched_unit);
             rt_dump_vcpu(ops, svc);
         }
     }
@@ -429,7 +429,7 @@ rt_dump(const struct scheduler *ops)
  * it needs to be updated to the deadline of the current period
  */
 static void
-rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
+rt_update_deadline(s_time_t now, struct rt_unit *svc)
 {
     ASSERT(now >= svc->cur_deadline);
     ASSERT(svc->period != 0);
@@ -500,8 +500,8 @@ deadline_queue_remove(struct list_head *queue, struct list_head *elem)
 }
 
 static inline bool
-deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
-                      struct rt_vcpu *svc, struct list_head *elem,
+deadline_queue_insert(struct rt_unit * (*qelem)(struct list_head *),
+                      struct rt_unit *svc, struct list_head *elem,
                       struct list_head *queue)
 {
     struct list_head *iter;
@@ -509,7 +509,7 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
 
     list_for_each ( iter, queue )
     {
-        struct rt_vcpu * iter_svc = (*qelem)(iter);
+        struct rt_unit * iter_svc = (*qelem)(iter);
         if ( compare_vcpu_priority(svc, iter_svc) > 0 )
             break;
         pos++;
@@ -523,14 +523,14 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
   deadline_queue_insert(&replq_elem, ##__VA_ARGS__)
 
 static inline void
-q_remove(struct rt_vcpu *svc)
+q_remove(struct rt_unit *svc)
 {
     ASSERT( vcpu_on_q(svc) );
     list_del_init(&svc->q_elem);
 }
 
 static inline void
-replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_remove(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *replq = rt_replq(ops);
@@ -547,7 +547,7 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
          */
         if ( !list_empty(replq) )
         {
-            struct rt_vcpu *svc_next = replq_elem(replq->next);
+            struct rt_unit *svc_next = replq_elem(replq->next);
             set_timer(&prv->repl_timer, svc_next->cur_deadline);
         }
         else
@@ -561,7 +561,7 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
  * Insert svc without budget in DepletedQ unsorted;
  */
 static void
-runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *runq = rt_runq(ops);
@@ -579,7 +579,7 @@ runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
 }
 
 static void
-replq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_insert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct list_head *replq = rt_replq(ops);
     struct rt_private *prv = rt_priv(ops);
@@ -601,10 +601,10 @@ replq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
  * changed.
  */
 static void
-replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_reinsert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct list_head *replq = rt_replq(ops);
-    struct rt_vcpu *rearm_svc = svc;
+    struct rt_unit *rearm_svc = svc;
     bool_t rearm = 0;
 
     ASSERT( vcpu_on_replq(svc) );
@@ -735,7 +735,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                 void *pdata, void *vdata)
 {
     struct rt_private *prv = rt_priv(new_ops);
-    struct rt_vcpu *svc = vdata;
+    struct rt_unit *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
 
@@ -842,10 +842,10 @@ static void *
 rt_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit, void *dd)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct rt_vcpu);
+    svc = xzalloc(struct rt_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -870,7 +870,7 @@ rt_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit, void *dd)
 static void
 rt_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct rt_vcpu *svc = priv;
+    struct rt_unit *svc = priv;
 
     xfree(svc);
 }
@@ -886,7 +886,7 @@ static void
 rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu *svc = rt_vcpu(vc);
+    struct rt_unit *svc = rt_unit(unit);
     s_time_t now;
     spinlock_t *lock;
 
@@ -915,13 +915,13 @@ rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 }
 
 /*
- * Remove rt_vcpu svc from the old scheduler in source cpupool.
+ * Remove rt_unit svc from the old scheduler in source cpupool.
  */
 static void
 rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
     struct rt_dom * const sdom = svc->sdom;
     spinlock_t *lock;
 
@@ -943,7 +943,7 @@ rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
  * Burn budget in nanosecond granularity
  */
 static void
-burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
+burn_budget(const struct scheduler *ops, struct rt_unit *svc, s_time_t now)
 {
     s_time_t delta;
 
@@ -1007,13 +1007,13 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
  * RunQ is sorted. Pick first one within cpumask. If no one, return NULL
  * lock is grabbed before calling this function
  */
-static struct rt_vcpu *
+static struct rt_unit *
 runq_pick(const struct scheduler *ops, const cpumask_t *mask)
 {
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter;
-    struct rt_vcpu *svc = NULL;
-    struct rt_vcpu *iter_svc = NULL;
+    struct rt_unit *svc = NULL;
+    struct rt_unit *iter_svc = NULL;
     cpumask_t cpu_common;
     cpumask_t *online;
 
@@ -1064,8 +1064,8 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
 {
     const int cpu = smp_processor_id();
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *const scurr = rt_vcpu(current);
-    struct rt_vcpu *snext = NULL;
+    struct rt_unit *const scurr = rt_unit(current->sched_unit);
+    struct rt_unit *snext = NULL;
     struct task_slice ret = { .migrated = 0 };
 
     /* TRACE */
@@ -1091,13 +1091,13 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
     if ( tasklet_work_scheduled )
     {
         trace_var(TRC_RTDS_SCHED_TASKLET, 1, 0,  NULL);
-        snext = rt_vcpu(idle_vcpu[cpu]);
+        snext = rt_unit(idle_vcpu[cpu]->sched_unit);
     }
     else
     {
         snext = runq_pick(ops, cpumask_of(cpu));
         if ( snext == NULL )
-            snext = rt_vcpu(idle_vcpu[cpu]);
+            snext = rt_unit(idle_vcpu[cpu]->sched_unit);
 
         /* if scurr has higher priority and budget, still pick scurr */
         if ( !is_idle_vcpu(current) &&
@@ -1143,12 +1143,12 @@ static void
 rt_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
 
     BUG_ON( is_idle_vcpu(vc) );
     SCHED_STAT_CRANK(vcpu_sleep);
 
-    if ( curr_on_cpu(vc->processor) == vc )
+    if ( curr_on_cpu(vc->processor) == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
     else if ( vcpu_on_q(svc) )
     {
@@ -1178,11 +1178,11 @@ rt_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
  * lock is grabbed before calling this function
  */
 static void
-runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
+runq_tickle(const struct scheduler *ops, struct rt_unit *new)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *latest_deadline_vcpu = NULL; /* lowest priority */
-    struct rt_vcpu *iter_svc;
+    struct rt_unit *latest_deadline_vcpu = NULL; /* lowest priority */
+    struct rt_unit *iter_svc;
     struct vcpu *iter_vc;
     int cpu = 0, cpu_to_tickle = 0;
     cpumask_t not_tickled;
@@ -1203,14 +1203,14 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
     cpu = cpumask_test_or_cycle(new->vcpu->processor, &not_tickled);
     while ( cpu!= nr_cpu_ids )
     {
-        iter_vc = curr_on_cpu(cpu);
+        iter_vc = curr_on_cpu(cpu)->vcpu;
         if ( is_idle_vcpu(iter_vc) )
         {
             SCHED_STAT_CRANK(tickled_idle_cpu);
             cpu_to_tickle = cpu;
             goto out;
         }
-        iter_svc = rt_vcpu(iter_vc);
+        iter_svc = rt_unit(iter_vc->sched_unit);
         if ( latest_deadline_vcpu == NULL ||
              compare_vcpu_priority(iter_svc, latest_deadline_vcpu) < 0 )
             latest_deadline_vcpu = iter_svc;
@@ -1259,13 +1259,13 @@ static void
 rt_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
     s_time_t now;
     bool_t missed;
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( unlikely(curr_on_cpu(vc->processor) == vc) )
+    if ( unlikely(curr_on_cpu(vc->processor) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -1330,7 +1330,7 @@ static void
 rt_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu *svc = rt_vcpu(vc);
+    struct rt_unit *svc = rt_unit(unit);
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
 
     __clear_bit(__RTDS_scheduled, &svc->flags);
@@ -1361,7 +1361,7 @@ rt_dom_cntl(
     struct xen_domctl_scheduler_op *op)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     struct vcpu *v;
     unsigned long flags;
     int rc = 0;
@@ -1385,7 +1385,7 @@ rt_dom_cntl(
         spin_lock_irqsave(&prv->lock, flags);
         for_each_vcpu ( d, v )
         {
-            svc = rt_vcpu(v);
+            svc = rt_unit(v->sched_unit);
             svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec */
             svc->budget = MICROSECS(op->u.rtds.budget);
         }
@@ -1411,7 +1411,7 @@ rt_dom_cntl(
             if ( op->cmd == XEN_DOMCTL_SCHEDOP_getvcpuinfo )
             {
                 spin_lock_irqsave(&prv->lock, flags);
-                svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+                svc = rt_unit(d->vcpu[local_sched.vcpuid]->sched_unit);
                 local_sched.u.rtds.budget = svc->budget / MICROSECS(1);
                 local_sched.u.rtds.period = svc->period / MICROSECS(1);
                 if ( has_extratime(svc) )
@@ -1439,7 +1439,7 @@ rt_dom_cntl(
                 }
 
                 spin_lock_irqsave(&prv->lock, flags);
-                svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+                svc = rt_unit(d->vcpu[local_sched.vcpuid]->sched_unit);
                 svc->period = period;
                 svc->budget = budget;
                 if ( local_sched.u.rtds.flags & XEN_DOMCTL_SCHEDRT_extra )
@@ -1472,7 +1472,7 @@ static void repl_timer_handler(void *data){
     struct list_head *replq = rt_replq(ops);
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter, *tmp;
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     LIST_HEAD(tmp_replq);
 
     spin_lock_irq(&prv->lock);
@@ -1514,10 +1514,10 @@ static void repl_timer_handler(void *data){
     {
         svc = replq_elem(iter);
 
-        if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu &&
+        if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu->sched_unit &&
              !list_empty(runq) )
         {
-            struct rt_vcpu *next_on_runq = q_elem(runq->next);
+            struct rt_unit *next_on_runq = q_elem(runq->next);
 
             if ( compare_vcpu_priority(svc, next_on_runq) < 0 )
                 runq_tickle(ops, next_on_runq);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 5dfdaaece2..c0cec0b6ca 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -334,7 +334,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = v;
+        per_cpu(schedule_data, v->processor).curr = unit;
         v->is_running = 1;
     }
     else
@@ -1513,7 +1513,7 @@ static void schedule(void)
 
     next = next_slice.task;
 
-    sd->curr = next;
+    sd->curr = next->sched_unit;
 
     if ( next_slice.time >= 0 ) /* -ve means no limit */
         set_timer(&sd->s_timer, now + next_slice.time);
@@ -1636,7 +1636,6 @@ static int cpu_schedule_up(unsigned int cpu)
     per_cpu(scheduler, cpu) = &ops;
     spin_lock_init(&sd->_lock);
     sd->schedule_lock = &sd->_lock;
-    sd->curr = idle_vcpu[cpu];
     init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
     atomic_set(&sd->urgent_count, 0);
 
@@ -1670,6 +1669,8 @@ static int cpu_schedule_up(unsigned int cpu)
     if ( idle_vcpu[cpu] == NULL )
         return -ENOMEM;
 
+    sd->curr = idle_vcpu[cpu]->sched_unit;
+
     /*
      * We don't want to risk calling xfree() on an sd->sched_priv
      * (e.g., inside free_pdata, from cpu_schedule_down() called
@@ -1863,6 +1864,7 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
+    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_unit;
     this_cpu(schedule_data).sched_priv = sched_alloc_pdata(&ops, 0);
     BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
     sched_init_pdata(&ops, this_cpu(schedule_data).sched_priv, 0);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index dc149bc102..9b7855e775 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -36,7 +36,7 @@ extern int sched_ratelimit_us;
 struct schedule_data {
     spinlock_t         *schedule_lock,
                        _lock;
-    struct vcpu        *curr;           /* current task                    */
+    struct sched_unit  *curr;           /* current task                    */
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

WARNING: multiple messages have this Message-ID (diff)
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>, Tim Deegan <tim@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Robert VanVossen <robert.vanvossen@dornerworks.com>,
	Dario Faggioli <dfaggioli@suse.com>,
	Julien Grall <julien.grall@arm.com>,
	Josh Whitehead <josh.whitehead@dornerworks.com>,
	Meng Xu <mengxu@cis.upenn.edu>, Jan Beulich <jbeulich@suse.com>
Subject: [Xen-devel] [PATCH 10/60] xen/sched: switch schedule_data.curr to point at sched_unit
Date: Tue, 28 May 2019 12:32:23 +0200	[thread overview]
Message-ID: <20190528103313.1343-11-jgross@suse.com> (raw)
Message-ID: <20190528103223.RKrmn1XuYfRVKSboR4Xkvj7EElnEQWcs0iqKoAe4QUM@z> (raw)
In-Reply-To: <20190528103313.1343-1-jgross@suse.com>

In preparation of core scheduling let the percpu pointer
schedule_data.curr point to a strct sched_unit instead of the related
vcpu. At the same time rename the per-vcpu scheduler specific structs
to per-unit ones.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched_arinc653.c |   2 +-
 xen/common/sched_credit.c   | 101 +++++++++++++-------------
 xen/common/sched_credit2.c  | 168 ++++++++++++++++++++++----------------------
 xen/common/sched_null.c     |  44 ++++++------
 xen/common/sched_rt.c       | 118 +++++++++++++++----------------
 xen/common/schedule.c       |   8 ++-
 xen/include/xen/sched-if.h  |   2 +-
 7 files changed, 220 insertions(+), 223 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 383df750b0..7f26e6a0b0 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == vc )
+    if ( per_cpu(schedule_data, vc->processor).curr == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 7f7596b3bf..bc87b813dc 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -83,7 +83,7 @@
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
     ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
-#define CSCHED_VCPU(_vcpu)  ((struct csched_vcpu *) (_vcpu)->sched_unit->priv)
+#define CSCHED_UNIT(unit)   ((struct csched_unit *) (unit)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
 
@@ -160,7 +160,7 @@ struct csched_pcpu {
 /*
  * Virtual CPU
  */
-struct csched_vcpu {
+struct csched_unit {
     struct list_head runq_elem;
     struct list_head active_vcpu_elem;
 
@@ -231,15 +231,15 @@ static void csched_tick(void *_cpu);
 static void csched_acct(void *dummy);
 
 static inline int
-__vcpu_on_runq(struct csched_vcpu *svc)
+__vcpu_on_runq(struct csched_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static inline struct csched_vcpu *
+static inline struct csched_unit *
 __runq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct csched_vcpu, runq_elem);
+    return list_entry(elem, struct csched_unit, runq_elem);
 }
 
 /* Is the first element of cpu's runq (if any) cpu's idle vcpu? */
@@ -271,7 +271,7 @@ dec_nr_runnable(unsigned int cpu)
 }
 
 static inline void
-__runq_insert(struct csched_vcpu *svc)
+__runq_insert(struct csched_unit *svc)
 {
     unsigned int cpu = svc->vcpu->processor;
     const struct list_head * const runq = RUNQ(cpu);
@@ -281,7 +281,7 @@ __runq_insert(struct csched_vcpu *svc)
 
     list_for_each( iter, runq )
     {
-        const struct csched_vcpu * const iter_svc = __runq_elem(iter);
+        const struct csched_unit * const iter_svc = __runq_elem(iter);
         if ( svc->pri > iter_svc->pri )
             break;
     }
@@ -302,34 +302,34 @@ __runq_insert(struct csched_vcpu *svc)
 }
 
 static inline void
-runq_insert(struct csched_vcpu *svc)
+runq_insert(struct csched_unit *svc)
 {
     __runq_insert(svc);
     inc_nr_runnable(svc->vcpu->processor);
 }
 
 static inline void
-__runq_remove(struct csched_vcpu *svc)
+__runq_remove(struct csched_unit *svc)
 {
     BUG_ON( !__vcpu_on_runq(svc) );
     list_del_init(&svc->runq_elem);
 }
 
 static inline void
-runq_remove(struct csched_vcpu *svc)
+runq_remove(struct csched_unit *svc)
 {
     dec_nr_runnable(svc->vcpu->processor);
     __runq_remove(svc);
 }
 
-static void burn_credits(struct csched_vcpu *svc, s_time_t now)
+static void burn_credits(struct csched_unit *svc, s_time_t now)
 {
     s_time_t delta;
     uint64_t val;
     unsigned int credits;
 
     /* Assert svc is current */
-    ASSERT( svc == CSCHED_VCPU(curr_on_cpu(svc->vcpu->processor)) );
+    ASSERT( svc == CSCHED_UNIT(curr_on_cpu(svc->vcpu->processor)) );
 
     if ( (delta = now - svc->start_time) <= 0 )
         return;
@@ -347,10 +347,10 @@ boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
 
 DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
 
-static inline void __runq_tickle(struct csched_vcpu *new)
+static inline void __runq_tickle(struct csched_unit *new)
 {
     unsigned int cpu = new->vcpu->processor;
-    struct csched_vcpu * const cur = CSCHED_VCPU(curr_on_cpu(cpu));
+    struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
     struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     cpumask_t mask, idle_mask, *online;
     int balance_step, idlers_empty;
@@ -605,7 +605,7 @@ init_pdata(struct csched_private *prv, struct csched_pcpu *spc, int cpu)
     spc->idle_bias = nr_cpu_ids - 1;
 
     /* Start off idling... */
-    BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)));
+    BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)->vcpu));
     cpumask_set_cpu(cpu, prv->idlers);
     spc->nr_runnable = 0;
 }
@@ -637,7 +637,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
-    struct csched_vcpu *svc = vdata;
+    struct csched_unit *svc = vdata;
 
     ASSERT(svc && is_idle_vcpu(svc->vcpu));
 
@@ -660,7 +660,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 static inline void
 __csched_vcpu_check(struct vcpu *vc)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(vc->sched_unit);
     struct csched_dom * const sdom = svc->sdom;
 
     BUG_ON( svc->vcpu != vc );
@@ -862,7 +862,7 @@ static struct sched_resource *
 csched_res_pick(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu *svc = CSCHED_VCPU(vc);
+    struct csched_unit *svc = CSCHED_UNIT(unit);
 
     /*
      * We have been called by vcpu_migrate() (in schedule.c), as part
@@ -876,7 +876,7 @@ csched_res_pick(const struct scheduler *ops, struct sched_unit *unit)
 }
 
 static inline void
-__csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc)
+__csched_vcpu_acct_start(struct csched_private *prv, struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
     unsigned long flags;
@@ -906,7 +906,7 @@ __csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc)
 
 static inline void
 __csched_vcpu_acct_stop_locked(struct csched_private *prv,
-    struct csched_vcpu *svc)
+    struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
@@ -931,7 +931,7 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
 static void
 csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(current);
+    struct csched_unit * const svc = CSCHED_UNIT(current->sched_unit);
     const struct scheduler *ops = per_cpu(scheduler, cpu);
 
     ASSERT( current->processor == cpu );
@@ -1000,10 +1000,10 @@ csched_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit,
                    void *dd)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct csched_vcpu);
+    svc = xzalloc(struct csched_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -1022,7 +1022,7 @@ static void
 csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu *svc = unit->priv;
+    struct csched_unit *svc = unit->priv;
     spinlock_t *lock;
 
     BUG_ON( is_idle_vcpu(vc) );
@@ -1048,7 +1048,7 @@ csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 static void
 csched_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct csched_vcpu *svc = priv;
+    struct csched_unit *svc = priv;
 
     BUG_ON( !list_empty(&svc->runq_elem) );
 
@@ -1059,8 +1059,7 @@ static void
 csched_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     struct csched_dom * const sdom = svc->sdom;
 
     SCHED_STAT_CRANK(vcpu_remove);
@@ -1087,14 +1086,14 @@ static void
 csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     unsigned int cpu = vc->processor;
 
     SCHED_STAT_CRANK(vcpu_sleep);
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( curr_on_cpu(cpu) == vc )
+    if ( curr_on_cpu(cpu) == unit )
     {
         /*
          * We are about to tickle cpu, so we should clear its bit in idlers.
@@ -1112,12 +1111,12 @@ static void
 csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     bool_t migrating;
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( unlikely(curr_on_cpu(vc->processor) == vc) )
+    if ( unlikely(curr_on_cpu(vc->processor) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -1173,8 +1172,7 @@ csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 static void
 csched_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *vc = unit->vcpu;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
 
     /* Let the scheduler know that this vcpu is trying to yield */
     set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags);
@@ -1229,8 +1227,7 @@ static void
 csched_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
                 const cpumask_t *hard, const cpumask_t *soft)
 {
-    struct vcpu *v = unit->vcpu;
-    struct csched_vcpu *svc = CSCHED_VCPU(v);
+    struct csched_unit *svc = CSCHED_UNIT(unit);
 
     if ( !hard )
         return;
@@ -1333,7 +1330,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu)
 {
     struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
     struct list_head *runq, *elem, *next, *last_under;
-    struct csched_vcpu *svc_elem;
+    struct csched_unit *svc_elem;
     spinlock_t *lock;
     unsigned long flags;
     int sort_epoch;
@@ -1379,7 +1376,7 @@ csched_acct(void* dummy)
     unsigned long flags;
     struct list_head *iter_vcpu, *next_vcpu;
     struct list_head *iter_sdom, *next_sdom;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
     struct csched_dom *sdom;
     uint32_t credit_total;
     uint32_t weight_total;
@@ -1502,7 +1499,7 @@ csched_acct(void* dummy)
 
         list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
         {
-            svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem);
+            svc = list_entry(iter_vcpu, struct csched_unit, active_vcpu_elem);
             BUG_ON( sdom != svc->sdom );
 
             /* Increment credit */
@@ -1606,12 +1603,12 @@ csched_tick(void *_cpu)
     set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) );
 }
 
-static struct csched_vcpu *
+static struct csched_unit *
 csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
 {
     const struct csched_private * const prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
-    struct csched_vcpu *speer;
+    struct csched_unit *speer;
     struct list_head *iter;
     struct vcpu *vc;
 
@@ -1621,7 +1618,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
      * Don't steal from an idle CPU's runq because it's about to
      * pick up work from it itself.
      */
-    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
+    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu)->vcpu)) )
         goto out;
 
     list_for_each( iter, &peer_pcpu->runq )
@@ -1683,12 +1680,12 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
     return NULL;
 }
 
-static struct csched_vcpu *
+static struct csched_unit *
 csched_load_balance(struct csched_private *prv, int cpu,
-    struct csched_vcpu *snext, bool_t *stolen)
+    struct csched_unit *snext, bool_t *stolen)
 {
     struct cpupool *c = per_cpu(cpupool, cpu);
-    struct csched_vcpu *speer;
+    struct csched_unit *speer;
     cpumask_t workers;
     cpumask_t *online;
     int peer_cpu, first_cpu, peer_node, bstep;
@@ -1837,9 +1834,9 @@ csched_schedule(
 {
     const int cpu = smp_processor_id();
     struct list_head * const runq = RUNQ(cpu);
-    struct csched_vcpu * const scurr = CSCHED_VCPU(current);
+    struct csched_unit * const scurr = CSCHED_UNIT(current->sched_unit);
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct csched_vcpu *snext;
+    struct csched_unit *snext;
     struct task_slice ret;
     s_time_t runtime, tslice;
 
@@ -1955,7 +1952,7 @@ csched_schedule(
     if ( tasklet_work_scheduled )
     {
         TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
-        snext = CSCHED_VCPU(idle_vcpu[cpu]);
+        snext = CSCHED_UNIT(idle_vcpu[cpu]->sched_unit);
         snext->pri = CSCHED_PRI_TS_BOOST;
     }
 
@@ -2007,7 +2004,7 @@ out:
 }
 
 static void
-csched_dump_vcpu(struct csched_vcpu *svc)
+csched_dump_vcpu(struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
@@ -2043,7 +2040,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     struct list_head *runq, *iter;
     struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_pcpu *spc;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
     spinlock_t *lock;
     unsigned long flags;
     int loop;
@@ -2067,7 +2064,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
            nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu). */
-    svc = CSCHED_VCPU(curr_on_cpu(cpu));
+    svc = CSCHED_UNIT(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         printk("\trun: ");
@@ -2136,10 +2133,10 @@ csched_dump(const struct scheduler *ops)
 
         list_for_each( iter_svc, &sdom->active_vcpu )
         {
-            struct csched_vcpu *svc;
+            struct csched_unit *svc;
             spinlock_t *lock;
 
-            svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem);
+            svc = list_entry(iter_svc, struct csched_unit, active_vcpu_elem);
             lock = vcpu_schedule_lock(svc->vcpu);
 
             printk("\t%3d: ", ++loop);
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index b71802bba2..36201815e3 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -176,7 +176,7 @@
  *     load balancing;
  *  + serializes runqueue operations (removing and inserting vcpus);
  *  + protects runqueue-wide data in csched2_runqueue_data;
- *  + protects vcpu parameters in csched2_vcpu for the vcpu in the
+ *  + protects vcpu parameters in csched2_unit for the vcpu in the
  *    runqueue.
  *
  * - Private scheduler lock
@@ -511,7 +511,7 @@ struct csched2_pcpu {
 /*
  * Virtual CPU
  */
-struct csched2_vcpu {
+struct csched2_unit {
     struct csched2_dom *sdom;          /* Up-pointer to domain                */
     struct vcpu *vcpu;                 /* Up-pointer, to vcpu                 */
     struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue          */
@@ -570,9 +570,9 @@ static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
     return per_cpu(schedule_data, cpu).sched_priv;
 }
 
-static inline struct csched2_vcpu *csched2_vcpu(const struct vcpu *v)
+static inline struct csched2_unit *csched2_unit(const struct sched_unit *unit)
 {
-    return v->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline struct csched2_dom *csched2_dom(const struct domain *d)
@@ -594,7 +594,7 @@ static inline struct csched2_runqueue_data *c2rqd(const struct scheduler *ops,
 }
 
 /* Does the domain of this vCPU have a cap? */
-static inline bool has_cap(const struct csched2_vcpu *svc)
+static inline bool has_cap(const struct csched2_unit *svc)
 {
     return svc->budget != STIME_MAX;
 }
@@ -688,7 +688,7 @@ void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask)
  * Of course, 1, 2 and 3 makes sense only if svc has a soft affinity. Also
  * note that at least 5 is guaranteed to _always_ return at least one pcpu.
  */
-static int get_fallback_cpu(struct csched2_vcpu *svc)
+static int get_fallback_cpu(struct csched2_unit *svc)
 {
     struct vcpu *v = svc->vcpu;
     unsigned int bs;
@@ -773,7 +773,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc)
  * FIXME: Do pre-calculated division?
  */
 static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
-                          struct csched2_vcpu *svc)
+                          struct csched2_unit *svc)
 {
     uint64_t val = time * rqd->max_weight + svc->residual;
 
@@ -781,7 +781,7 @@ static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
     svc->credit -= val;
 }
 
-static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_vcpu *svc)
+static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_unit *svc)
 {
     return credit * svc->weight / rqd->max_weight;
 }
@@ -790,14 +790,14 @@ static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct c
  * Runqueue related code.
  */
 
-static inline int vcpu_on_runq(struct csched2_vcpu *svc)
+static inline int vcpu_on_runq(struct csched2_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static inline struct csched2_vcpu * runq_elem(struct list_head *elem)
+static inline struct csched2_unit * runq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct csched2_vcpu, runq_elem);
+    return list_entry(elem, struct csched2_unit, runq_elem);
 }
 
 static void activate_runqueue(struct csched2_private *prv, int rqi)
@@ -915,7 +915,7 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight,
 
         list_for_each( iter, &rqd->svc )
         {
-            struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
+            struct csched2_unit * svc = list_entry(iter, struct csched2_unit, rqd_elem);
 
             if ( svc->weight > max_weight )
                 max_weight = svc->weight;
@@ -940,7 +940,7 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight,
 
 /* Add and remove from runqueue assignment (not active run queue) */
 static void
-_runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
+_runq_assign(struct csched2_unit *svc, struct csched2_runqueue_data *rqd)
 {
 
     svc->rqd = rqd;
@@ -970,7 +970,7 @@ _runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
 static void
 runq_assign(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu *svc = vc->sched_unit->priv;
+    struct csched2_unit *svc = vc->sched_unit->priv;
 
     ASSERT(svc->rqd == NULL);
 
@@ -978,7 +978,7 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc)
 }
 
 static void
-_runq_deassign(struct csched2_vcpu *svc)
+_runq_deassign(struct csched2_unit *svc)
 {
     struct csched2_runqueue_data *rqd = svc->rqd;
 
@@ -997,7 +997,7 @@ _runq_deassign(struct csched2_vcpu *svc)
 static void
 runq_deassign(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu *svc = vc->sched_unit->priv;
+    struct csched2_unit *svc = vc->sched_unit->priv;
 
     ASSERT(svc->rqd == c2rqd(ops, vc->processor));
 
@@ -1199,7 +1199,7 @@ update_runq_load(const struct scheduler *ops,
 
 static void
 update_svc_load(const struct scheduler *ops,
-                struct csched2_vcpu *svc, int change, s_time_t now)
+                struct csched2_unit *svc, int change, s_time_t now)
 {
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, vcpu_load;
@@ -1259,7 +1259,7 @@ update_svc_load(const struct scheduler *ops,
 static void
 update_load(const struct scheduler *ops,
             struct csched2_runqueue_data *rqd,
-            struct csched2_vcpu *svc, int change, s_time_t now)
+            struct csched2_unit *svc, int change, s_time_t now)
 {
     trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0,  NULL);
 
@@ -1269,7 +1269,7 @@ update_load(const struct scheduler *ops,
 }
 
 static void
-runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct csched2_unit *svc)
 {
     struct list_head *iter;
     unsigned int cpu = svc->vcpu->processor;
@@ -1288,7 +1288,7 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
 
     list_for_each( iter, runq )
     {
-        struct csched2_vcpu * iter_svc = runq_elem(iter);
+        struct csched2_unit * iter_svc = runq_elem(iter);
 
         if ( svc->credit > iter_svc->credit )
             break;
@@ -1312,13 +1312,13 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
     }
 }
 
-static inline void runq_remove(struct csched2_vcpu *svc)
+static inline void runq_remove(struct csched2_unit *svc)
 {
     ASSERT(vcpu_on_runq(svc));
     list_del_init(&svc->runq_elem);
 }
 
-void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, s_time_t);
+void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_unit *, s_time_t);
 
 static inline void
 tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd)
@@ -1334,7 +1334,7 @@ tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd)
  * whether or not it already run for more than the ratelimit, to which we
  * apply some tolerance).
  */
-static inline bool is_preemptable(const struct csched2_vcpu *svc,
+static inline bool is_preemptable(const struct csched2_unit *svc,
                                     s_time_t now, s_time_t ratelimit)
 {
     if ( ratelimit <= CSCHED2_RATELIMIT_TICKLE_TOLERANCE )
@@ -1360,10 +1360,10 @@ static inline bool is_preemptable(const struct csched2_vcpu *svc,
  * Within the same class, the highest difference of credit.
  */
 static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
-                             struct csched2_vcpu *new, unsigned int cpu)
+                             struct csched2_unit *new, unsigned int cpu)
 {
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
-    struct csched2_vcpu * cur = csched2_vcpu(curr_on_cpu(cpu));
+    struct csched2_unit * cur = csched2_unit(curr_on_cpu(cpu));
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t score;
 
@@ -1432,7 +1432,7 @@ static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
  * pick up some work, so it would be wrong to consider it idle.
  */
 static void
-runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
+runq_tickle(const struct scheduler *ops, struct csched2_unit *new, s_time_t now)
 {
     int i, ipid = -1;
     s_time_t max = 0;
@@ -1587,7 +1587,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
         return;
     }
 
-    ASSERT(!is_idle_vcpu(curr_on_cpu(ipid)));
+    ASSERT(!is_idle_vcpu(curr_on_cpu(ipid)->vcpu));
     SCHED_STAT_CRANK(tickled_busy_cpu);
  tickle:
     BUG_ON(ipid == -1);
@@ -1614,7 +1614,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
  * Credit-related code
  */
 static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
-                         struct csched2_vcpu *snext)
+                         struct csched2_unit *snext)
 {
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     struct list_head *iter;
@@ -1644,10 +1644,10 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
     list_for_each( iter, &rqd->svc )
     {
         unsigned int svc_cpu;
-        struct csched2_vcpu * svc;
+        struct csched2_unit * svc;
         int start_credit;
 
-        svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
+        svc = list_entry(iter, struct csched2_unit, rqd_elem);
         svc_cpu = svc->vcpu->processor;
 
         ASSERT(!is_idle_vcpu(svc->vcpu));
@@ -1657,7 +1657,7 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
          * If svc is running, it is our responsibility to make sure, here,
          * that the credit it has spent so far get accounted.
          */
-        if ( svc->vcpu == curr_on_cpu(svc_cpu) )
+        if ( svc->vcpu == curr_on_cpu(svc_cpu)->vcpu )
         {
             burn_credits(rqd, svc, now);
             /*
@@ -1709,11 +1709,11 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
 }
 
 void burn_credits(struct csched2_runqueue_data *rqd,
-                  struct csched2_vcpu *svc, s_time_t now)
+                  struct csched2_unit *svc, s_time_t now)
 {
     s_time_t delta;
 
-    ASSERT(svc == csched2_vcpu(curr_on_cpu(svc->vcpu->processor)));
+    ASSERT(svc == csched2_unit(curr_on_cpu(svc->vcpu->processor)));
 
     if ( unlikely(is_idle_vcpu(svc->vcpu)) )
     {
@@ -1763,7 +1763,7 @@ void burn_credits(struct csched2_runqueue_data *rqd,
  * Budget-related code.
  */
 
-static void park_vcpu(struct csched2_vcpu *svc)
+static void park_vcpu(struct csched2_unit *svc)
 {
     struct vcpu *v = svc->vcpu;
 
@@ -1792,7 +1792,7 @@ static void park_vcpu(struct csched2_vcpu *svc)
     list_add(&svc->parked_elem, &svc->sdom->parked_vcpus);
 }
 
-static bool vcpu_grab_budget(struct csched2_vcpu *svc)
+static bool vcpu_grab_budget(struct csched2_unit *svc)
 {
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
@@ -1839,7 +1839,7 @@ static bool vcpu_grab_budget(struct csched2_vcpu *svc)
 }
 
 static void
-vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked)
+vcpu_return_budget(struct csched2_unit *svc, struct list_head *parked)
 {
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
@@ -1882,7 +1882,7 @@ vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked)
 static void
 unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
 {
-    struct csched2_vcpu *svc, *tmp;
+    struct csched2_unit *svc, *tmp;
     spinlock_t *lock;
 
     list_for_each_entry_safe(svc, tmp, vcpus, parked_elem)
@@ -2004,7 +2004,7 @@ static void replenish_domain_budget(void* data)
 static inline void
 csched2_vcpu_check(struct vcpu *vc)
 {
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(vc->sched_unit);
     struct csched2_dom * const sdom = svc->sdom;
 
     BUG_ON( svc->vcpu != vc );
@@ -2030,10 +2030,10 @@ csched2_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit,
                     void *dd)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu *svc;
+    struct csched2_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct csched2_vcpu);
+    svc = xzalloc(struct csched2_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -2074,12 +2074,12 @@ static void
 csched2_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
 
     ASSERT(!is_idle_vcpu(vc));
     SCHED_STAT_CRANK(vcpu_sleep);
 
-    if ( curr_on_cpu(vc->processor) == vc )
+    if ( curr_on_cpu(vc->processor) == unit )
     {
         tickle_cpu(vc->processor, svc->rqd);
     }
@@ -2097,7 +2097,7 @@ static void
 csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     unsigned int cpu = vc->processor;
     s_time_t now;
 
@@ -2105,7 +2105,7 @@ csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 
     ASSERT(!is_idle_vcpu(vc));
 
-    if ( unlikely(curr_on_cpu(cpu) == vc) )
+    if ( unlikely(curr_on_cpu(cpu) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         goto out;
@@ -2152,8 +2152,7 @@ out:
 static void
 csched2_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *v = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(v);
+    struct csched2_unit * const svc = csched2_unit(unit);
 
     __set_bit(__CSFLAG_vcpu_yield, &svc->flags);
 }
@@ -2162,7 +2161,7 @@ static void
 csched2_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
     s_time_t now = NOW();
     LIST_HEAD(were_parked);
@@ -2208,7 +2207,7 @@ csched2_res_pick(const struct scheduler *ops, struct sched_unit *unit)
     struct vcpu *vc = unit->vcpu;
     int i, min_rqi = -1, min_s_rqi = -1;
     unsigned int new_cpu, cpu = vc->processor;
-    struct csched2_vcpu *svc = csched2_vcpu(vc);
+    struct csched2_unit *svc = csched2_unit(unit);
     s_time_t min_avgload = MAX_LOAD, min_s_avgload = MAX_LOAD;
     bool has_soft;
 
@@ -2430,15 +2429,15 @@ csched2_res_pick(const struct scheduler *ops, struct sched_unit *unit)
 typedef struct {
     /* NB: Modified by consider() */
     s_time_t load_delta;
-    struct csched2_vcpu * best_push_svc, *best_pull_svc;
+    struct csched2_unit * best_push_svc, *best_pull_svc;
     /* NB: Read by consider() */
     struct csched2_runqueue_data *lrqd;
     struct csched2_runqueue_data *orqd;                  
 } balance_state_t;
 
 static void consider(balance_state_t *st, 
-                     struct csched2_vcpu *push_svc,
-                     struct csched2_vcpu *pull_svc)
+                     struct csched2_unit *push_svc,
+                     struct csched2_unit *pull_svc)
 {
     s_time_t l_load, o_load, delta;
 
@@ -2471,8 +2470,8 @@ static void consider(balance_state_t *st,
 
 
 static void migrate(const struct scheduler *ops,
-                    struct csched2_vcpu *svc, 
-                    struct csched2_runqueue_data *trqd, 
+                    struct csched2_unit *svc,
+                    struct csched2_runqueue_data *trqd,
                     s_time_t now)
 {
     int cpu = svc->vcpu->processor;
@@ -2541,7 +2540,7 @@ static void migrate(const struct scheduler *ops,
  *  - svc is not already flagged to migrate,
  *  - if svc is allowed to run on at least one of the pcpus of rqd.
  */
-static bool vcpu_is_migrateable(struct csched2_vcpu *svc,
+static bool vcpu_is_migrateable(struct csched2_unit *svc,
                                   struct csched2_runqueue_data *rqd)
 {
     struct vcpu *v = svc->vcpu;
@@ -2691,7 +2690,7 @@ retry:
     /* Reuse load delta (as we're trying to minimize it) */
     list_for_each( push_iter, &st.lrqd->svc )
     {
-        struct csched2_vcpu * push_svc = list_entry(push_iter, struct csched2_vcpu, rqd_elem);
+        struct csched2_unit * push_svc = list_entry(push_iter, struct csched2_unit, rqd_elem);
 
         update_svc_load(ops, push_svc, 0, now);
 
@@ -2700,7 +2699,7 @@ retry:
 
         list_for_each( pull_iter, &st.orqd->svc )
         {
-            struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem);
+            struct csched2_unit * pull_svc = list_entry(pull_iter, struct csched2_unit, rqd_elem);
             
             if ( !inner_load_updated )
                 update_svc_load(ops, pull_svc, 0, now);
@@ -2719,7 +2718,7 @@ retry:
 
     list_for_each( pull_iter, &st.orqd->svc )
     {
-        struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem);
+        struct csched2_unit * pull_svc = list_entry(pull_iter, struct csched2_unit, rqd_elem);
         
         if ( !vcpu_is_migrateable(pull_svc, st.lrqd) )
             continue;
@@ -2746,7 +2745,7 @@ csched2_unit_migrate(
 {
     struct vcpu *vc = unit->vcpu;
     struct domain *d = vc->domain;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     struct csched2_runqueue_data *trqd;
     s_time_t now = NOW();
 
@@ -2847,7 +2846,7 @@ csched2_dom_cntl(
             /* Update weights for vcpus, and max_weight for runqueues on which they reside */
             for_each_vcpu ( d, v )
             {
-                struct csched2_vcpu *svc = csched2_vcpu(v);
+                struct csched2_unit *svc = csched2_unit(v->sched_unit);
                 spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
 
                 ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
@@ -2861,7 +2860,7 @@ csched2_dom_cntl(
         /* Cap */
         if ( op->u.credit2.cap != 0 )
         {
-            struct csched2_vcpu *svc;
+            struct csched2_unit *svc;
             spinlock_t *lock;
 
             /* Cap is only valid if it's below 100 * nr_of_vCPUS */
@@ -2885,7 +2884,7 @@ csched2_dom_cntl(
              */
             for_each_vcpu ( d, v )
             {
-                svc = csched2_vcpu(v);
+                svc = csched2_unit(v->sched_unit);
                 lock = vcpu_schedule_lock(svc->vcpu);
                 /*
                  * Too small quotas would in theory cause a lot of overhead,
@@ -2928,14 +2927,14 @@ csched2_dom_cntl(
                  */
                 for_each_vcpu ( d, v )
                 {
-                    svc = csched2_vcpu(v);
+                    svc = csched2_unit(v->sched_unit);
                     lock = vcpu_schedule_lock(svc->vcpu);
                     if ( v->is_running )
                     {
                         unsigned int cpu = v->processor;
                         struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
 
-                        ASSERT(curr_on_cpu(cpu) == v);
+                        ASSERT(curr_on_cpu(cpu)->vcpu == v);
 
                         /*
                          * We are triggering a reschedule on the vCPU's
@@ -2975,7 +2974,7 @@ csched2_dom_cntl(
             /* Disable budget accounting for all the vCPUs. */
             for_each_vcpu ( d, v )
             {
-                struct csched2_vcpu *svc = csched2_vcpu(v);
+                struct csched2_unit *svc = csched2_unit(v->sched_unit);
                 spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
 
                 svc->budget = STIME_MAX;
@@ -3012,8 +3011,7 @@ static void
 csched2_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
                  const cpumask_t *hard, const cpumask_t *soft)
 {
-    struct vcpu *v = unit->vcpu;
-    struct csched2_vcpu *svc = csched2_vcpu(v);
+    struct csched2_unit *svc = csched2_unit(unit);
 
     if ( !hard )
         return;
@@ -3113,7 +3111,7 @@ static void
 csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu *svc = unit->priv;
+    struct csched2_unit *svc = unit->priv;
     struct csched2_dom * const sdom = svc->sdom;
     spinlock_t *lock;
 
@@ -3145,7 +3143,7 @@ csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 static void
 csched2_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct csched2_vcpu *svc = priv;
+    struct csched2_unit *svc = priv;
 
     xfree(svc);
 }
@@ -3154,7 +3152,7 @@ static void
 csched2_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     spinlock_t *lock;
 
     ASSERT(!is_idle_vcpu(vc));
@@ -3175,7 +3173,7 @@ csched2_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 /* How long should we let this vcpu run for? */
 static s_time_t
 csched2_runtime(const struct scheduler *ops, int cpu,
-                struct csched2_vcpu *snext, s_time_t now)
+                struct csched2_unit *snext, s_time_t now)
 {
     s_time_t time, min_time;
     int rt_credit; /* Proposed runtime measured in credits */
@@ -3220,7 +3218,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
      */
     if ( ! list_empty(runq) )
     {
-        struct csched2_vcpu *swait = runq_elem(runq->next);
+        struct csched2_unit *swait = runq_elem(runq->next);
 
         if ( ! is_idle_vcpu(swait->vcpu)
              && swait->credit > 0 )
@@ -3271,14 +3269,14 @@ csched2_runtime(const struct scheduler *ops, int cpu,
 /*
  * Find a candidate.
  */
-static struct csched2_vcpu *
+static struct csched2_unit *
 runq_candidate(struct csched2_runqueue_data *rqd,
-               struct csched2_vcpu *scurr,
+               struct csched2_unit *scurr,
                int cpu, s_time_t now,
                unsigned int *skipped)
 {
     struct list_head *iter, *temp;
-    struct csched2_vcpu *snext = NULL;
+    struct csched2_unit *snext = NULL;
     struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
     bool yield = false, soft_aff_preempt = false;
 
@@ -3359,12 +3357,12 @@ runq_candidate(struct csched2_runqueue_data *rqd,
     if ( vcpu_runnable(scurr->vcpu) && !soft_aff_preempt )
         snext = scurr;
     else
-        snext = csched2_vcpu(idle_vcpu[cpu]);
+        snext = csched2_unit(idle_vcpu[cpu]->sched_unit);
 
  check_runq:
     list_for_each_safe( iter, temp, &rqd->runq )
     {
-        struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, runq_elem);
+        struct csched2_unit * svc = list_entry(iter, struct csched2_unit, runq_elem);
 
         if ( unlikely(tb_init_done) )
         {
@@ -3463,8 +3461,8 @@ csched2_schedule(
 {
     const int cpu = smp_processor_id();
     struct csched2_runqueue_data *rqd;
-    struct csched2_vcpu * const scurr = csched2_vcpu(current);
-    struct csched2_vcpu *snext = NULL;
+    struct csched2_unit * const scurr = csched2_unit(current->sched_unit);
+    struct csched2_unit *snext = NULL;
     unsigned int skipped_vcpus = 0;
     struct task_slice ret;
     bool tickled;
@@ -3540,7 +3538,7 @@ csched2_schedule(
     {
         __clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
         trace_var(TRC_CSCHED2_SCHED_TASKLET, 1, 0, NULL);
-        snext = csched2_vcpu(idle_vcpu[cpu]);
+        snext = csched2_unit(idle_vcpu[cpu]->sched_unit);
     }
     else
         snext = runq_candidate(rqd, scurr, cpu, now, &skipped_vcpus);
@@ -3643,7 +3641,7 @@ csched2_schedule(
 }
 
 static void
-csched2_dump_vcpu(struct csched2_private *prv, struct csched2_vcpu *svc)
+csched2_dump_vcpu(struct csched2_private *prv, struct csched2_unit *svc)
 {
     printk("[%i.%i] flags=%x cpu=%i",
             svc->vcpu->domain->domain_id,
@@ -3667,7 +3665,7 @@ static inline void
 dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct csched2_private *prv = csched2_priv(ops);
-    struct csched2_vcpu *svc;
+    struct csched2_unit *svc;
 
     printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
            cpu, c2r(cpu),
@@ -3675,7 +3673,7 @@ dump_pcpu(const struct scheduler *ops, int cpu)
            nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
-    svc = csched2_vcpu(curr_on_cpu(cpu));
+    svc = csched2_unit(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         printk("\trun: ");
@@ -3748,7 +3746,7 @@ csched2_dump(const struct scheduler *ops)
 
         for_each_vcpu( sdom->dom, v )
         {
-            struct csched2_vcpu * const svc = csched2_vcpu(v);
+            struct csched2_unit * const svc = csched2_unit(v->sched_unit);
             spinlock_t *lock;
 
             lock = vcpu_schedule_lock(svc->vcpu);
@@ -3777,7 +3775,7 @@ csched2_dump(const struct scheduler *ops)
         printk("RUNQ:\n");
         list_for_each( iter, runq )
         {
-            struct csched2_vcpu *svc = runq_elem(iter);
+            struct csched2_unit *svc = runq_elem(iter);
 
             if ( svc )
             {
@@ -3878,7 +3876,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                      void *pdata, void *vdata)
 {
     struct csched2_private *prv = csched2_priv(new_ops);
-    struct csched2_vcpu *svc = vdata;
+    struct csched2_unit *svc = vdata;
     unsigned rqi;
 
     ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 1ea8643a9c..defcdfcf1e 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -94,7 +94,7 @@ DEFINE_PER_CPU(struct null_pcpu, npc);
 /*
  * Virtual CPU
  */
-struct null_vcpu {
+struct null_unit {
     struct list_head waitq_elem;
     struct vcpu *vcpu;
 };
@@ -115,9 +115,9 @@ static inline struct null_private *null_priv(const struct scheduler *ops)
     return ops->sched_data;
 }
 
-static inline struct null_vcpu *null_vcpu(const struct vcpu *v)
+static inline struct null_unit *null_unit(const struct sched_unit *unit)
 {
-    return v->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
@@ -197,9 +197,9 @@ static void *null_alloc_vdata(const struct scheduler *ops,
                               struct sched_unit *unit, void *dd)
 {
     struct vcpu *v = unit->vcpu;
-    struct null_vcpu *nvc;
+    struct null_unit *nvc;
 
-    nvc = xzalloc(struct null_vcpu);
+    nvc = xzalloc(struct null_unit);
     if ( nvc == NULL )
         return NULL;
 
@@ -213,7 +213,7 @@ static void *null_alloc_vdata(const struct scheduler *ops,
 
 static void null_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct null_vcpu *nvc = priv;
+    struct null_unit *nvc = priv;
 
     xfree(nvc);
 }
@@ -391,7 +391,7 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
 {
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     struct null_private *prv = null_priv(new_ops);
-    struct null_vcpu *nvc = vdata;
+    struct null_unit *nvc = vdata;
 
     ASSERT(nvc && is_idle_vcpu(nvc->vcpu));
 
@@ -414,7 +414,7 @@ static void null_unit_insert(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
     unsigned int cpu;
     spinlock_t *lock;
 
@@ -471,9 +471,9 @@ static void _vcpu_remove(struct null_private *prv, struct vcpu *v)
 {
     unsigned int bs;
     unsigned int cpu = v->processor;
-    struct null_vcpu *wvc;
+    struct null_unit *wvc;
 
-    ASSERT(list_empty(&null_vcpu(v)->waitq_elem));
+    ASSERT(list_empty(&null_unit(v->sched_unit)->waitq_elem));
 
     vcpu_deassign(prv, v, cpu);
 
@@ -509,7 +509,7 @@ static void null_unit_remove(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
     spinlock_t *lock;
 
     ASSERT(!is_idle_vcpu(v));
@@ -544,13 +544,13 @@ static void null_unit_wake(const struct scheduler *ops,
 
     ASSERT(!is_idle_vcpu(v));
 
-    if ( unlikely(curr_on_cpu(v->processor) == v) )
+    if ( unlikely(curr_on_cpu(v->processor) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
     }
 
-    if ( unlikely(!list_empty(&null_vcpu(v)->waitq_elem)) )
+    if ( unlikely(!list_empty(&null_unit(unit)->waitq_elem)) )
     {
         /* Not exactly "on runq", but close enough for reusing the counter */
         SCHED_STAT_CRANK(vcpu_wake_onrunq);
@@ -574,7 +574,7 @@ static void null_unit_sleep(const struct scheduler *ops,
     ASSERT(!is_idle_vcpu(v));
 
     /* If v is not assigned to a pCPU, or is not running, no need to bother */
-    if ( curr_on_cpu(v->processor) == v )
+    if ( curr_on_cpu(v->processor) == unit )
         cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
 
     SCHED_STAT_CRANK(vcpu_sleep);
@@ -592,7 +592,7 @@ static void null_unit_migrate(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
 
     ASSERT(!is_idle_vcpu(v));
 
@@ -677,7 +677,7 @@ static void null_unit_migrate(const struct scheduler *ops,
 #ifndef NDEBUG
 static inline void null_vcpu_check(struct vcpu *v)
 {
-    struct null_vcpu * const nvc = null_vcpu(v);
+    struct null_unit * const nvc = null_unit(v->sched_unit);
     struct null_dom * const ndom = v->domain->sched_priv;
 
     BUG_ON(nvc->vcpu != v);
@@ -707,7 +707,7 @@ static struct task_slice null_schedule(const struct scheduler *ops,
     unsigned int bs;
     const unsigned int cpu = smp_processor_id();
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *wvc;
+    struct null_unit *wvc;
     struct task_slice ret;
 
     SCHED_STAT_CRANK(schedule);
@@ -790,7 +790,7 @@ static struct task_slice null_schedule(const struct scheduler *ops,
     return ret;
 }
 
-static inline void dump_vcpu(struct null_private *prv, struct null_vcpu *nvc)
+static inline void dump_vcpu(struct null_private *prv, struct null_unit *nvc)
 {
     printk("[%i.%i] pcpu=%d", nvc->vcpu->domain->domain_id,
             nvc->vcpu->vcpu_id, list_empty(&nvc->waitq_elem) ?
@@ -800,7 +800,7 @@ static inline void dump_vcpu(struct null_private *prv, struct null_vcpu *nvc)
 static void null_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc;
+    struct null_unit *nvc;
     spinlock_t *lock;
     unsigned long flags;
 
@@ -815,7 +815,7 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
     printk("\n");
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
-    nvc = null_vcpu(curr_on_cpu(cpu));
+    nvc = null_unit(curr_on_cpu(cpu));
     if ( nvc && !is_idle_vcpu(nvc->vcpu) )
     {
         printk("\trun: ");
@@ -849,7 +849,7 @@ static void null_dump(const struct scheduler *ops)
         printk("\tDomain: %d\n", ndom->dom->domain_id);
         for_each_vcpu( ndom->dom, v )
         {
-            struct null_vcpu * const nvc = null_vcpu(v);
+            struct null_unit * const nvc = null_unit(v->sched_unit);
             spinlock_t *lock;
 
             lock = vcpu_schedule_lock(nvc->vcpu);
@@ -867,7 +867,7 @@ static void null_dump(const struct scheduler *ops)
     spin_lock(&prv->waitq_lock);
     list_for_each( iter, &prv->waitq )
     {
-        struct null_vcpu *nvc = list_entry(iter, struct null_vcpu, waitq_elem);
+        struct null_unit *nvc = list_entry(iter, struct null_unit, waitq_elem);
 
         if ( loop++ != 0 )
             printk(", ");
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index b80bc02357..8caec5c5dc 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -195,7 +195,7 @@ struct rt_private {
 /*
  * Virtual CPU
  */
-struct rt_vcpu {
+struct rt_unit {
     struct list_head q_elem;     /* on the runq/depletedq list */
     struct list_head replq_elem; /* on the replenishment events list */
 
@@ -233,9 +233,9 @@ static inline struct rt_private *rt_priv(const struct scheduler *ops)
     return ops->sched_data;
 }
 
-static inline struct rt_vcpu *rt_vcpu(const struct vcpu *vcpu)
+static inline struct rt_unit *rt_unit(const struct sched_unit *unit)
 {
-    return vcpu->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline struct list_head *rt_runq(const struct scheduler *ops)
@@ -253,7 +253,7 @@ static inline struct list_head *rt_replq(const struct scheduler *ops)
     return &rt_priv(ops)->replq;
 }
 
-static inline bool has_extratime(const struct rt_vcpu *svc)
+static inline bool has_extratime(const struct rt_unit *svc)
 {
     return svc->flags & RTDS_extratime;
 }
@@ -263,25 +263,25 @@ static inline bool has_extratime(const struct rt_vcpu *svc)
  * and the replenishment events queue.
  */
 static int
-vcpu_on_q(const struct rt_vcpu *svc)
+vcpu_on_q(const struct rt_unit *svc)
 {
    return !list_empty(&svc->q_elem);
 }
 
-static struct rt_vcpu *
+static struct rt_unit *
 q_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct rt_vcpu, q_elem);
+    return list_entry(elem, struct rt_unit, q_elem);
 }
 
-static struct rt_vcpu *
+static struct rt_unit *
 replq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct rt_vcpu, replq_elem);
+    return list_entry(elem, struct rt_unit, replq_elem);
 }
 
 static int
-vcpu_on_replq(const struct rt_vcpu *svc)
+vcpu_on_replq(const struct rt_unit *svc)
 {
     return !list_empty(&svc->replq_elem);
 }
@@ -291,7 +291,7 @@ vcpu_on_replq(const struct rt_vcpu *svc)
  * Otherwise, return value < 0
  */
 static s_time_t
-compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2)
+compare_vcpu_priority(const struct rt_unit *v1, const struct rt_unit *v2)
 {
     int prio = v2->priority_level - v1->priority_level;
 
@@ -305,7 +305,7 @@ compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2)
  * Debug related code, dump vcpu/cpu information
  */
 static void
-rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
+rt_dump_vcpu(const struct scheduler *ops, const struct rt_unit *svc)
 {
     cpumask_t *cpupool_mask, *mask;
 
@@ -352,13 +352,13 @@ static void
 rt_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     unsigned long flags;
 
     spin_lock_irqsave(&prv->lock, flags);
     printk("CPU[%02d]\n", cpu);
     /* current VCPU (nothing to say if that's the idle vcpu). */
-    svc = rt_vcpu(curr_on_cpu(cpu));
+    svc = rt_unit(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         rt_dump_vcpu(ops, svc);
@@ -371,7 +371,7 @@ rt_dump(const struct scheduler *ops)
 {
     struct list_head *runq, *depletedq, *replq, *iter;
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     struct rt_dom *sdom;
     unsigned long flags;
 
@@ -415,7 +415,7 @@ rt_dump(const struct scheduler *ops)
 
         for_each_vcpu ( sdom->dom, v )
         {
-            svc = rt_vcpu(v);
+            svc = rt_unit(v->sched_unit);
             rt_dump_vcpu(ops, svc);
         }
     }
@@ -429,7 +429,7 @@ rt_dump(const struct scheduler *ops)
  * it needs to be updated to the deadline of the current period
  */
 static void
-rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
+rt_update_deadline(s_time_t now, struct rt_unit *svc)
 {
     ASSERT(now >= svc->cur_deadline);
     ASSERT(svc->period != 0);
@@ -500,8 +500,8 @@ deadline_queue_remove(struct list_head *queue, struct list_head *elem)
 }
 
 static inline bool
-deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
-                      struct rt_vcpu *svc, struct list_head *elem,
+deadline_queue_insert(struct rt_unit * (*qelem)(struct list_head *),
+                      struct rt_unit *svc, struct list_head *elem,
                       struct list_head *queue)
 {
     struct list_head *iter;
@@ -509,7 +509,7 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
 
     list_for_each ( iter, queue )
     {
-        struct rt_vcpu * iter_svc = (*qelem)(iter);
+        struct rt_unit * iter_svc = (*qelem)(iter);
         if ( compare_vcpu_priority(svc, iter_svc) > 0 )
             break;
         pos++;
@@ -523,14 +523,14 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
   deadline_queue_insert(&replq_elem, ##__VA_ARGS__)
 
 static inline void
-q_remove(struct rt_vcpu *svc)
+q_remove(struct rt_unit *svc)
 {
     ASSERT( vcpu_on_q(svc) );
     list_del_init(&svc->q_elem);
 }
 
 static inline void
-replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_remove(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *replq = rt_replq(ops);
@@ -547,7 +547,7 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
          */
         if ( !list_empty(replq) )
         {
-            struct rt_vcpu *svc_next = replq_elem(replq->next);
+            struct rt_unit *svc_next = replq_elem(replq->next);
             set_timer(&prv->repl_timer, svc_next->cur_deadline);
         }
         else
@@ -561,7 +561,7 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
  * Insert svc without budget in DepletedQ unsorted;
  */
 static void
-runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *runq = rt_runq(ops);
@@ -579,7 +579,7 @@ runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
 }
 
 static void
-replq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_insert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct list_head *replq = rt_replq(ops);
     struct rt_private *prv = rt_priv(ops);
@@ -601,10 +601,10 @@ replq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
  * changed.
  */
 static void
-replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_reinsert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct list_head *replq = rt_replq(ops);
-    struct rt_vcpu *rearm_svc = svc;
+    struct rt_unit *rearm_svc = svc;
     bool_t rearm = 0;
 
     ASSERT( vcpu_on_replq(svc) );
@@ -735,7 +735,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                 void *pdata, void *vdata)
 {
     struct rt_private *prv = rt_priv(new_ops);
-    struct rt_vcpu *svc = vdata;
+    struct rt_unit *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
 
@@ -842,10 +842,10 @@ static void *
 rt_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit, void *dd)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct rt_vcpu);
+    svc = xzalloc(struct rt_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -870,7 +870,7 @@ rt_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit, void *dd)
 static void
 rt_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct rt_vcpu *svc = priv;
+    struct rt_unit *svc = priv;
 
     xfree(svc);
 }
@@ -886,7 +886,7 @@ static void
 rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu *svc = rt_vcpu(vc);
+    struct rt_unit *svc = rt_unit(unit);
     s_time_t now;
     spinlock_t *lock;
 
@@ -915,13 +915,13 @@ rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 }
 
 /*
- * Remove rt_vcpu svc from the old scheduler in source cpupool.
+ * Remove rt_unit svc from the old scheduler in source cpupool.
  */
 static void
 rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
     struct rt_dom * const sdom = svc->sdom;
     spinlock_t *lock;
 
@@ -943,7 +943,7 @@ rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
  * Burn budget in nanosecond granularity
  */
 static void
-burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
+burn_budget(const struct scheduler *ops, struct rt_unit *svc, s_time_t now)
 {
     s_time_t delta;
 
@@ -1007,13 +1007,13 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
  * RunQ is sorted. Pick first one within cpumask. If no one, return NULL
  * lock is grabbed before calling this function
  */
-static struct rt_vcpu *
+static struct rt_unit *
 runq_pick(const struct scheduler *ops, const cpumask_t *mask)
 {
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter;
-    struct rt_vcpu *svc = NULL;
-    struct rt_vcpu *iter_svc = NULL;
+    struct rt_unit *svc = NULL;
+    struct rt_unit *iter_svc = NULL;
     cpumask_t cpu_common;
     cpumask_t *online;
 
@@ -1064,8 +1064,8 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
 {
     const int cpu = smp_processor_id();
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *const scurr = rt_vcpu(current);
-    struct rt_vcpu *snext = NULL;
+    struct rt_unit *const scurr = rt_unit(current->sched_unit);
+    struct rt_unit *snext = NULL;
     struct task_slice ret = { .migrated = 0 };
 
     /* TRACE */
@@ -1091,13 +1091,13 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
     if ( tasklet_work_scheduled )
     {
         trace_var(TRC_RTDS_SCHED_TASKLET, 1, 0,  NULL);
-        snext = rt_vcpu(idle_vcpu[cpu]);
+        snext = rt_unit(idle_vcpu[cpu]->sched_unit);
     }
     else
     {
         snext = runq_pick(ops, cpumask_of(cpu));
         if ( snext == NULL )
-            snext = rt_vcpu(idle_vcpu[cpu]);
+            snext = rt_unit(idle_vcpu[cpu]->sched_unit);
 
         /* if scurr has higher priority and budget, still pick scurr */
         if ( !is_idle_vcpu(current) &&
@@ -1143,12 +1143,12 @@ static void
 rt_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
 
     BUG_ON( is_idle_vcpu(vc) );
     SCHED_STAT_CRANK(vcpu_sleep);
 
-    if ( curr_on_cpu(vc->processor) == vc )
+    if ( curr_on_cpu(vc->processor) == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
     else if ( vcpu_on_q(svc) )
     {
@@ -1178,11 +1178,11 @@ rt_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
  * lock is grabbed before calling this function
  */
 static void
-runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
+runq_tickle(const struct scheduler *ops, struct rt_unit *new)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *latest_deadline_vcpu = NULL; /* lowest priority */
-    struct rt_vcpu *iter_svc;
+    struct rt_unit *latest_deadline_vcpu = NULL; /* lowest priority */
+    struct rt_unit *iter_svc;
     struct vcpu *iter_vc;
     int cpu = 0, cpu_to_tickle = 0;
     cpumask_t not_tickled;
@@ -1203,14 +1203,14 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
     cpu = cpumask_test_or_cycle(new->vcpu->processor, &not_tickled);
     while ( cpu!= nr_cpu_ids )
     {
-        iter_vc = curr_on_cpu(cpu);
+        iter_vc = curr_on_cpu(cpu)->vcpu;
         if ( is_idle_vcpu(iter_vc) )
         {
             SCHED_STAT_CRANK(tickled_idle_cpu);
             cpu_to_tickle = cpu;
             goto out;
         }
-        iter_svc = rt_vcpu(iter_vc);
+        iter_svc = rt_unit(iter_vc->sched_unit);
         if ( latest_deadline_vcpu == NULL ||
              compare_vcpu_priority(iter_svc, latest_deadline_vcpu) < 0 )
             latest_deadline_vcpu = iter_svc;
@@ -1259,13 +1259,13 @@ static void
 rt_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
     s_time_t now;
     bool_t missed;
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( unlikely(curr_on_cpu(vc->processor) == vc) )
+    if ( unlikely(curr_on_cpu(vc->processor) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -1330,7 +1330,7 @@ static void
 rt_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu;
-    struct rt_vcpu *svc = rt_vcpu(vc);
+    struct rt_unit *svc = rt_unit(unit);
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
 
     __clear_bit(__RTDS_scheduled, &svc->flags);
@@ -1361,7 +1361,7 @@ rt_dom_cntl(
     struct xen_domctl_scheduler_op *op)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     struct vcpu *v;
     unsigned long flags;
     int rc = 0;
@@ -1385,7 +1385,7 @@ rt_dom_cntl(
         spin_lock_irqsave(&prv->lock, flags);
         for_each_vcpu ( d, v )
         {
-            svc = rt_vcpu(v);
+            svc = rt_unit(v->sched_unit);
             svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec */
             svc->budget = MICROSECS(op->u.rtds.budget);
         }
@@ -1411,7 +1411,7 @@ rt_dom_cntl(
             if ( op->cmd == XEN_DOMCTL_SCHEDOP_getvcpuinfo )
             {
                 spin_lock_irqsave(&prv->lock, flags);
-                svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+                svc = rt_unit(d->vcpu[local_sched.vcpuid]->sched_unit);
                 local_sched.u.rtds.budget = svc->budget / MICROSECS(1);
                 local_sched.u.rtds.period = svc->period / MICROSECS(1);
                 if ( has_extratime(svc) )
@@ -1439,7 +1439,7 @@ rt_dom_cntl(
                 }
 
                 spin_lock_irqsave(&prv->lock, flags);
-                svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+                svc = rt_unit(d->vcpu[local_sched.vcpuid]->sched_unit);
                 svc->period = period;
                 svc->budget = budget;
                 if ( local_sched.u.rtds.flags & XEN_DOMCTL_SCHEDRT_extra )
@@ -1472,7 +1472,7 @@ static void repl_timer_handler(void *data){
     struct list_head *replq = rt_replq(ops);
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter, *tmp;
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     LIST_HEAD(tmp_replq);
 
     spin_lock_irq(&prv->lock);
@@ -1514,10 +1514,10 @@ static void repl_timer_handler(void *data){
     {
         svc = replq_elem(iter);
 
-        if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu &&
+        if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu->sched_unit &&
              !list_empty(runq) )
         {
-            struct rt_vcpu *next_on_runq = q_elem(runq->next);
+            struct rt_unit *next_on_runq = q_elem(runq->next);
 
             if ( compare_vcpu_priority(svc, next_on_runq) < 0 )
                 runq_tickle(ops, next_on_runq);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 5dfdaaece2..c0cec0b6ca 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -334,7 +334,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = v;
+        per_cpu(schedule_data, v->processor).curr = unit;
         v->is_running = 1;
     }
     else
@@ -1513,7 +1513,7 @@ static void schedule(void)
 
     next = next_slice.task;
 
-    sd->curr = next;
+    sd->curr = next->sched_unit;
 
     if ( next_slice.time >= 0 ) /* -ve means no limit */
         set_timer(&sd->s_timer, now + next_slice.time);
@@ -1636,7 +1636,6 @@ static int cpu_schedule_up(unsigned int cpu)
     per_cpu(scheduler, cpu) = &ops;
     spin_lock_init(&sd->_lock);
     sd->schedule_lock = &sd->_lock;
-    sd->curr = idle_vcpu[cpu];
     init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
     atomic_set(&sd->urgent_count, 0);
 
@@ -1670,6 +1669,8 @@ static int cpu_schedule_up(unsigned int cpu)
     if ( idle_vcpu[cpu] == NULL )
         return -ENOMEM;
 
+    sd->curr = idle_vcpu[cpu]->sched_unit;
+
     /*
      * We don't want to risk calling xfree() on an sd->sched_priv
      * (e.g., inside free_pdata, from cpu_schedule_down() called
@@ -1863,6 +1864,7 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
+    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_unit;
     this_cpu(schedule_data).sched_priv = sched_alloc_pdata(&ops, 0);
     BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
     sched_init_pdata(&ops, this_cpu(schedule_data).sched_priv, 0);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index dc149bc102..9b7855e775 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -36,7 +36,7 @@ extern int sched_ratelimit_us;
 struct schedule_data {
     spinlock_t         *schedule_lock,
                        _lock;
-    struct vcpu        *curr;           /* current task                    */
+    struct sched_unit  *curr;           /* current task                    */
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-05-28 10:33 UTC|newest]

Thread overview: 202+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-28 10:32 [PATCH 00/60] xen: add core scheduling support Juergen Gross
2019-05-28 10:32 ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 01/60] xen/sched: only allow schedulers with all mandatory functions available Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:03   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 02/60] xen/sched: add inline wrappers for calling per-scheduler functions Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:21   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 03/60] xen/sched: let sched_switch_sched() return new lock address Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:55   ` Dario Faggioli
2019-06-12  7:40     ` Jan Beulich
2019-06-12  8:06       ` Juergen Gross
2019-06-12  9:44         ` Dario Faggioli
2019-06-12  8:05   ` Andrew Cooper
2019-06-12  8:19     ` Juergen Gross
2019-06-12  9:32       ` Jan Beulich
     [not found]       ` <5D00C6960200007800237622@suse.com>
2019-06-12  9:56         ` Dario Faggioli
2019-06-12 10:14           ` Jan Beulich
2019-06-12 11:27             ` Andrew Cooper
2019-06-12 13:32               ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 04/60] xen/sched: use new sched_unit instead of vcpu in scheduler interfaces Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 17:44   ` Dario Faggioli
2019-07-19  4:49     ` Juergen Gross
2019-07-19 17:01       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 05/60] xen/sched: alloc struct sched_unit for each vcpu Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 17:57   ` Dario Faggioli
2019-07-19  4:56     ` Juergen Gross
2019-07-19 17:04       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 06/60] xen/sched: move per-vcpu scheduler private data pointer to sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 22:52   ` Dario Faggioli
2019-07-19  5:03     ` Juergen Gross
2019-07-19 17:10       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 07/60] xen/sched: build a linked list of struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19  0:01   ` Dario Faggioli
2019-07-19  5:07     ` Juergen Gross
2019-07-19 17:16       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 08/60] xen/sched: introduce struct sched_resource Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19 17:43   ` Dario Faggioli
2019-07-19 17:49   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 09/60] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19 18:06   ` Dario Faggioli
2019-05-28 10:32 ` Juergen Gross [this message]
2019-05-28 10:32   ` [Xen-devel] [PATCH 10/60] xen/sched: switch schedule_data.curr to point at sched_unit Juergen Gross
2019-07-29 22:08   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 11/60] xen/sched: move per cpu scheduler private data into struct sched_resource Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 11:32   ` Jan Beulich
2019-05-28 11:32     ` [Xen-devel] " Jan Beulich
2019-07-29 22:22   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 12/60] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 13/60] xen/sched: move some per-vcpu items to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-13  7:18   ` Andrii Anisov
2019-06-13  7:29     ` Juergen Gross
2019-06-13  7:34       ` Andrii Anisov
2019-06-13  8:39         ` Juergen Gross
2019-06-13  8:49           ` Andrii Anisov
2019-05-28 10:32 ` [PATCH 14/60] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 15/60] xen/sched: add domain pointer to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 16/60] xen/sched: add id " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 17/60] xen/sched: rename scheduler related perf counters Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 18/60] xen/sched: switch struct task_slice from vcpu to sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 19/60] xen/sched: add is_running indicator to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 20/60] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 21/60] xen/sched: make rt " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 22/60] xen/sched: make credit " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 23/60] xen/sched: make credit2 " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 24/60] xen/sched: make arinc653 " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 25/60] xen: add sched_unit_pause_nosync() and sched_unit_unpause() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 26/60] xen: let vcpu_create() select processor Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 27/60] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 28/60] xen/sched: switch schedule() from vcpus to sched_units Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 29/60] xen/sched: switch sched_move_irqs() to take sched_unit as parameter Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 30/60] xen: switch from for_each_vcpu() to for_each_sched_unit() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 31/60] xen/sched: add runstate counters to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 32/60] xen/sched: rework and rename vcpu_force_reschedule() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 33/60] xen/sched: Change vcpu_migrate_*() to operate on schedule unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 34/60] xen/sched: move struct task_slice into struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 35/60] xen/sched: add code to sync scheduling of all vcpus of a sched unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 36/60] xen/sched: introduce unit_runnable_state() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 37/60] xen/sched: add support for multiple vcpus per sched unit where missing Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 38/60] x86: make loading of GDT at context switch more modular Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-02 15:38   ` Andrew Cooper
2019-05-28 10:32 ` [PATCH 39/60] x86: optimize loading of GDT at context switch Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-02 16:09   ` Andrew Cooper
2019-07-03  6:30     ` Juergen Gross
2019-07-03 12:21       ` Andrew Cooper
2019-07-05  7:30         ` Juergen Gross
2019-05-28 10:32 ` [PATCH 40/60] xen/sched: modify cpupool_domain_cpumask() to be an unit mask Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 41/60] xen/sched: support allocating multiple vcpus into one sched unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 42/60] xen/sched: add a scheduler_percpu_init() function Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 43/60] xen/sched: add a percpu resource index Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 44/60] xen/sched: add fall back to idle vcpu when scheduling unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 45/60] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 46/60] xen/sched: carve out freeing sched_unit memory into dedicated function Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 47/60] xen/sched: move per-cpu variable scheduler to struct sched_resource Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 48/60] xen/sched: move per-cpu variable cpupool " Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 49/60] xen/sched: reject switching smt on/off with core scheduling active Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:44   ` Jan Beulich
2019-05-28 11:44     ` [Xen-devel] " Jan Beulich
2019-05-28 11:52     ` Juergen Gross
2019-05-28 11:52       ` [Xen-devel] " Juergen Gross
2019-06-12  9:36       ` Dario Faggioli
2019-05-28 10:33 ` [PATCH 50/60] xen/sched: prepare per-cpupool scheduling granularity Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 51/60] xen/sched: use one schedule lock for all free cpus Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 52/60] xen/sched: populate cpupool0 only after all cpus are up Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 53/60] xen/sched: remove cpu from pool0 before removing it Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 54/60] xen/sched: add minimalistic idle scheduler for free cpus Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:47   ` Jan Beulich
2019-05-28 11:47     ` [Xen-devel] " Jan Beulich
2019-05-28 11:58     ` Juergen Gross
2019-05-28 11:58       ` [Xen-devel] " Juergen Gross
2019-05-31 14:15       ` Dario Faggioli
2019-05-31 14:15         ` [Xen-devel] " Dario Faggioli
2019-05-31 15:52   ` Dario Faggioli
2019-05-31 15:52     ` [Xen-devel] " Dario Faggioli
2019-05-31 16:44     ` Juergen Gross
2019-05-31 16:44       ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 55/60] xen/sched: split schedule_cpu_switch() Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 56/60] xen/sched: protect scheduling resource via rcu Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 57/60] xen/sched: support multiple cpus per scheduling resource Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 58/60] xen/sched: support differing granularity in schedule_cpu_[add/rm]() Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 59/60] xen/sched: support core scheduling for moving cpus to/from cpupools Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 60/60] xen/sched: add scheduling granularity enum Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:51   ` Jan Beulich
2019-05-28 11:51     ` [Xen-devel] " Jan Beulich
2019-05-28 12:02     ` Juergen Gross
2019-05-28 12:02       ` [Xen-devel] " Juergen Gross
2019-07-19 18:31   ` Dario Faggioli
2019-07-05 13:17 ` [Xen-devel] [PATCH 00/60] xen: add core scheduling support Sergey Dyasli
2019-07-05 13:22   ` Juergen Gross
2019-07-05 13:56   ` Dario Faggioli
2019-07-15 14:08     ` Sergey Dyasli
2019-07-18 14:48       ` Juergen Gross
2019-07-18 15:14         ` Sergey Dyasli
2019-07-18 16:04           ` Dario Faggioli
2019-07-19  5:41           ` Juergen Gross
2019-07-19 11:24             ` Juergen Gross
2019-07-19 13:57           ` Juergen Gross
2019-07-22 14:22             ` Sergey Dyasli
2019-07-24  9:13               ` Juergen Gross
2019-07-24 14:54                 ` Sergey Dyasli
2019-07-24 15:11                   ` Juergen Gross
2019-07-16 15:45   ` Sergey Dyasli
2019-07-19 13:35     ` Juergen Gross
2019-07-25 16:01   ` Sergey Dyasli
2019-07-11 13:40 ` Dario Faggioli
     [not found] <20190528103313.1343„1„jgross@suse.com>
     [not found] ` <20190528103313.1343„4„jgross@suse.com>
     [not found] <20190528103313.13431jgross@suse.com>
     [not found] ` <20190528103313.13434jgross@suse.com>

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190528103313.1343-11-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=josh.whitehead@dornerworks.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=robert.vanvossen@dornerworks.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.