All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: "Juergen Gross" <jgross@suse.com>, "Tim Deegan" <tim@xen.org>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wei.liu2@citrix.com>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"George Dunlap" <george.dunlap@eu.citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Ian Jackson" <ian.jackson@eu.citrix.com>,
	"Robert VanVossen" <robert.vanvossen@dornerworks.com>,
	"Dario Faggioli" <dfaggioli@suse.com>,
	"Julien Grall" <julien.grall@arm.com>,
	"Josh Whitehead" <josh.whitehead@dornerworks.com>,
	"Meng Xu" <mengxu@cis.upenn.edu>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH RFC V2 09/45] xen/sched: move per cpu scheduler private data into struct sched_resource
Date: Mon,  6 May 2019 08:56:08 +0200	[thread overview]
Message-ID: <20190506065644.7415-10-jgross@suse.com> (raw)
In-Reply-To: <20190506065644.7415-1-jgross@suse.com>

This prepares support of larger scheduling granularities, e.g. core
scheduling.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched_arinc653.c   |  6 ++---
 xen/common/sched_credit.c     | 14 +++++------
 xen/common/sched_credit2.c    | 24 +++++++++----------
 xen/common/sched_null.c       |  8 +++----
 xen/common/sched_rt.c         | 12 +++++-----
 xen/common/schedule.c         | 56 +++++++++++++++++++++----------------------
 xen/include/asm-x86/cpuidle.h |  2 +-
 xen/include/xen/sched-if.h    | 20 +++++++---------
 8 files changed, 68 insertions(+), 74 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 5701baf337..9dc1ff6a73 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_item_sleep(const struct scheduler *ops, struct sched_item *item)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == item )
+    if ( per_cpu(sched_res, vc->processor)->curr == item )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
@@ -642,7 +642,7 @@ static void
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     arinc653_vcpu_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
@@ -650,7 +650,7 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     idle_vcpu[cpu]->sched_item->priv = vdata;
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
+    per_cpu(sched_res, cpu)->sched_priv = NULL; /* no pdata */
 
     /*
      * (Re?)route the lock to its default location. We actually do not use
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 6552d4c087..e8369b3648 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -82,7 +82,7 @@
 #define CSCHED_PRIV(_ops)   \
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
-    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
+    ((struct csched_pcpu *)per_cpu(sched_res, _c)->sched_priv)
 #define CSCHED_ITEM(item)   ((struct csched_item *) (item)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -248,7 +248,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
            is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
@@ -257,7 +257,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
 static inline void
 inc_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     CSCHED_PCPU(cpu)->nr_runnable++;
 
 }
@@ -265,7 +265,7 @@ inc_nr_runnable(unsigned int cpu)
 static inline void
 dec_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1);
     CSCHED_PCPU(cpu)->nr_runnable--;
 }
@@ -615,7 +615,7 @@ csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     unsigned long flags;
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
 
     /*
      * This is called either during during boot, resume or hotplug, in
@@ -635,7 +635,7 @@ static void
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_item *svc = vdata;
 
@@ -654,7 +654,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     spin_unlock(&prv->lock);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 5a3a0babab..df0e7282ce 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -567,7 +567,7 @@ static inline struct csched2_private *csched2_priv(const struct scheduler *ops)
 
 static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
 {
-    return per_cpu(schedule_data, cpu).sched_priv;
+    return per_cpu(sched_res, cpu)->sched_priv;
 }
 
 static inline struct csched2_item *csched2_item(const struct sched_item *item)
@@ -1276,7 +1276,7 @@ runq_insert(const struct scheduler *ops, struct csched2_item *svc)
     struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     ASSERT(!vcpu_on_runq(svc));
     ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
@@ -1797,7 +1797,7 @@ static bool vcpu_grab_budget(struct csched2_item *svc)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     if ( svc->budget > 0 )
         return true;
@@ -1844,7 +1844,7 @@ vcpu_return_budget(struct csched2_item *svc, struct list_head *parked)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     ASSERT(list_empty(parked));
 
     /* budget_lock nests inside runqueue lock. */
@@ -2101,7 +2101,7 @@ csched2_item_wake(const struct scheduler *ops, struct sched_item *item)
     unsigned int cpu = vc->processor;
     s_time_t now;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     ASSERT(!is_idle_vcpu(vc));
 
@@ -2229,7 +2229,7 @@ csched2_res_pick(const struct scheduler *ops, struct sched_item *item)
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -2569,7 +2569,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
      * on either side may be empty).
      */
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
     update_runq_load(ops, st.lrqd, 0, now);
@@ -3475,7 +3475,7 @@ csched2_schedule(
     rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -3864,7 +3864,7 @@ csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 
     rqi = init_pdata(prv, pdata, cpu);
     /* Move the scheduler lock to the new runq lock. */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock(old_lock);
@@ -3903,10 +3903,10 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * this scheduler, and so it's safe to have taken it /before/ our
      * private global lock.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
+    ASSERT(per_cpu(sched_res, cpu)->schedule_lock != &prv->rqd[rqi].lock);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
@@ -3914,7 +3914,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * taking it, find all the initializations we've done above in place.
      */
     smp_mb();
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     write_unlock(&prv->lock);
 }
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index f7a2650c48..a9cfa163b9 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -168,7 +168,7 @@ static void init_pdata(struct null_private *prv, unsigned int cpu)
 static void null_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
 
     /* alloc_pdata is not implemented, so we want this to be NULL. */
     ASSERT(!pdata);
@@ -277,7 +277,7 @@ pick_res(struct null_private *prv, struct sched_item *item)
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     for_each_affinity_balance_step( bs )
     {
@@ -388,7 +388,7 @@ static void vcpu_deassign(struct null_private *prv, struct vcpu *v,
 static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                               void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct null_private *prv = null_priv(new_ops);
     struct null_item *nvc = vdata;
 
@@ -406,7 +406,7 @@ static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     init_pdata(prv, cpu);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index a3cd00f765..0019646b52 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -75,7 +75,7 @@
 /*
  * Locking:
  * A global system lock is used to protect the RunQ and DepletedQ.
- * The global lock is referenced by schedule_data.schedule_lock
+ * The global lock is referenced by sched_res->schedule_lock
  * from all physical cpus.
  *
  * The lock is already grabbed when calling wake/sleep/schedule/ functions
@@ -176,7 +176,7 @@ static void repl_timer_handler(void *data);
 
 /*
  * System-wide private data, include global RunQueue/DepletedQ
- * Global lock is referenced by schedule_data.schedule_lock from all
+ * Global lock is referenced by sched_res->schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
@@ -723,7 +723,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
     }
 
     /* Move the scheduler lock to our global runqueue lock.  */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->lock;
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
@@ -745,7 +745,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * another scheduler, but that is how things need to be, for
      * preventing races.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->lock);
+    ASSERT(per_cpu(sched_res, cpu)->schedule_lock != &prv->lock);
 
     /*
      * If we are the absolute first cpu being switched toward this
@@ -763,7 +763,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 
     idle_vcpu[cpu]->sched_item->priv = vdata;
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
+    per_cpu(sched_res, cpu)->sched_priv = NULL; /* no pdata */
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
@@ -771,7 +771,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * taking it, find all the initializations we've done above in place.
      */
     smp_mb();
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->lock;
 }
 
 static void
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index b1eb77290a..51db98bcaa 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -61,7 +61,6 @@ static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU(struct sched_resource *, sched_res);
 
@@ -157,7 +156,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              !test_bit(v->vcpu_id, v->domain->poll_mask) )
         {
             v->is_urgent = 0;
-            atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_dec(&per_cpu(sched_res, v->processor)->urgent_count);
         }
     }
     else
@@ -166,7 +165,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
         {
             v->is_urgent = 1;
-            atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_inc(&per_cpu(sched_res, v->processor)->urgent_count);
         }
     }
 }
@@ -177,7 +176,7 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -334,7 +333,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = item;
+        per_cpu(sched_res, v->processor)->curr = item;
         v->is_running = 1;
     }
     else
@@ -459,7 +458,7 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
-        atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
+        atomic_dec(&per_cpu(sched_res, v->processor)->urgent_count);
     sched_remove_item(vcpu_scheduler(v), item);
     sched_free_vdata(vcpu_scheduler(v), item->priv);
     sched_free_item(item);
@@ -506,7 +505,7 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -601,8 +600,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
      */
     if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
     {
-        atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
-        atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+        atomic_inc(&per_cpu(sched_res, new_cpu)->urgent_count);
+        atomic_dec(&per_cpu(sched_res, old_cpu)->urgent_count);
     }
 
     /*
@@ -668,20 +667,20 @@ static void vcpu_migrate_finish(struct vcpu *v)
          * are not correct any longer after evaluating old and new cpu holding
          * the locks.
          */
-        old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
-        new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+        old_lock = per_cpu(sched_res, old_cpu)->schedule_lock;
+        new_lock = per_cpu(sched_res, new_cpu)->schedule_lock;
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
         old_cpu = v->processor;
-        if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+        if ( old_lock == per_cpu(sched_res, old_cpu)->schedule_lock )
         {
             /*
              * If we selected a CPU on the previosu iteration, check if it
              * remains suitable for running this vCPU.
              */
             if ( pick_called &&
-                 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+                 (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -689,7 +688,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
             /* Select a new CPU. */
             new_cpu = sched_pick_resource(vcpu_scheduler(v),
                                           v->sched_item)->processor;
-            if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+            if ( (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
@@ -1472,7 +1471,7 @@ static void schedule(void)
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
-    struct schedule_data *sd;
+    struct sched_resource *sd;
     spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
@@ -1481,7 +1480,7 @@ static void schedule(void)
 
     SCHED_STAT_CRANK(sched_run);
 
-    sd = &this_cpu(schedule_data);
+    sd = this_cpu(sched_res);
 
     /* Update tasklet scheduling status. */
     switch ( *tasklet_work )
@@ -1623,15 +1622,14 @@ static void poll_timer_fn(void *data)
 
 static int cpu_schedule_up(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd;
     void *sched_priv;
-    struct sched_resource *res;
 
-    res = xzalloc(struct sched_resource);
-    if ( res == NULL )
+    sd = xzalloc(struct sched_resource);
+    if ( sd == NULL )
         return -ENOMEM;
-    res->processor = cpu;
-    per_cpu(sched_res, cpu) = res;
+    sd->processor = cpu;
+    per_cpu(sched_res, cpu) = sd;
 
     per_cpu(scheduler, cpu) = &ops;
     spin_lock_init(&sd->_lock);
@@ -1687,7 +1685,7 @@ static int cpu_schedule_up(unsigned int cpu)
 
 static void cpu_schedule_down(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct scheduler *sched = per_cpu(scheduler, cpu);
 
     sched_free_pdata(sched, sd->sched_priv, cpu);
@@ -1707,7 +1705,7 @@ static int cpu_schedule_callback(
 {
     unsigned int cpu = (unsigned long)hcpu;
     struct scheduler *sched = per_cpu(scheduler, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     int rc = 0;
 
     /*
@@ -1840,10 +1838,10 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_item;
-    this_cpu(schedule_data).sched_priv = sched_alloc_pdata(&ops, 0);
-    BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
-    sched_init_pdata(&ops, this_cpu(schedule_data).sched_priv, 0);
+    this_cpu(sched_res)->curr = idle_vcpu[0]->sched_item;
+    this_cpu(sched_res)->sched_priv = sched_alloc_pdata(&ops, 0);
+    BUG_ON(IS_ERR(this_cpu(sched_res)->sched_priv));
+    sched_init_pdata(&ops, this_cpu(sched_res)->sched_priv, 0);
 }
 
 /*
@@ -1923,7 +1921,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     old_lock = pcpu_schedule_lock_irq(cpu);
 
     vpriv_old = idle->sched_item->priv;
-    ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
+    ppriv_old = per_cpu(sched_res, cpu)->sched_priv;
     sched_switch_sched(new_ops, cpu, ppriv, vpriv);
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h
index 08da01803f..f520145752 100644
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -33,7 +33,7 @@ void update_last_cx_stat(struct acpi_processor_power *,
  */
 static inline int sched_has_urgent_vcpu(void)
 {
-    return atomic_read(&this_cpu(schedule_data).urgent_count);
+    return atomic_read(&this_cpu(sched_res)->urgent_count);
 }
 
 #endif /* __X86_ASM_CPUIDLE_H__ */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9a0cd04af7..93617f0459 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -33,22 +33,18 @@ extern int sched_ratelimit_us;
  * For cache betterness, keep the actual lock in the same cache area
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
-struct schedule_data {
+struct sched_resource {
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_item  *curr;           /* current task                    */
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
+    unsigned            processor;
 };
 
-#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
-
-struct sched_resource {
-    unsigned     processor;
-};
+#define curr_on_cpu(c)    (per_cpu(sched_res, c)->curr)
 
-DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
@@ -69,7 +65,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 { \
     for ( ; ; ) \
     { \
-        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        spinlock_t *lock = per_cpu(sched_res, cpu)->schedule_lock; \
         /* \
          * v->processor may change when grabbing the lock; but \
          * per_cpu(v->processor) may also change, if changing cpu pool \
@@ -79,7 +75,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
          * lock may be the same; this will succeed in that case. \
          */ \
         spin_lock##irq(lock, ## arg); \
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+        if ( likely(lock == per_cpu(sched_res, cpu)->schedule_lock) ) \
             return lock; \
         spin_unlock##irq(lock, ## arg); \
     } \
@@ -89,7 +85,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
                                                EXTRA_TYPE(arg), param) \
 { \
-    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    ASSERT(lock == per_cpu(sched_res, cpu)->schedule_lock); \
     spin_unlock##irq(lock, ## arg); \
 }
 
@@ -118,11 +114,11 @@ sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
 
 static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spinlock_t *lock = per_cpu(sched_res, cpu)->schedule_lock;
 
     if ( !spin_trylock(lock) )
         return NULL;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+    if ( lock == per_cpu(sched_res, cpu)->schedule_lock )
         return lock;
     spin_unlock(lock);
     return NULL;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

WARNING: multiple messages have this Message-ID (diff)
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: "Juergen Gross" <jgross@suse.com>, "Tim Deegan" <tim@xen.org>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wei.liu2@citrix.com>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"George Dunlap" <george.dunlap@eu.citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Ian Jackson" <ian.jackson@eu.citrix.com>,
	"Robert VanVossen" <robert.vanvossen@dornerworks.com>,
	"Dario Faggioli" <dfaggioli@suse.com>,
	"Julien Grall" <julien.grall@arm.com>,
	"Josh Whitehead" <josh.whitehead@dornerworks.com>,
	"Meng Xu" <mengxu@cis.upenn.edu>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH RFC V2 09/45] xen/sched: move per cpu scheduler private data into struct sched_resource
Date: Mon,  6 May 2019 08:56:08 +0200	[thread overview]
Message-ID: <20190506065644.7415-10-jgross@suse.com> (raw)
Message-ID: <20190506065608.B5v7cQU_45nGaQzEVdg1shi4xQqFmG0HeLe_zK40BVk@z> (raw)
In-Reply-To: <20190506065644.7415-1-jgross@suse.com>

This prepares support of larger scheduling granularities, e.g. core
scheduling.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched_arinc653.c   |  6 ++---
 xen/common/sched_credit.c     | 14 +++++------
 xen/common/sched_credit2.c    | 24 +++++++++----------
 xen/common/sched_null.c       |  8 +++----
 xen/common/sched_rt.c         | 12 +++++-----
 xen/common/schedule.c         | 56 +++++++++++++++++++++----------------------
 xen/include/asm-x86/cpuidle.h |  2 +-
 xen/include/xen/sched-if.h    | 20 +++++++---------
 8 files changed, 68 insertions(+), 74 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 5701baf337..9dc1ff6a73 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_item_sleep(const struct scheduler *ops, struct sched_item *item)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == item )
+    if ( per_cpu(sched_res, vc->processor)->curr == item )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
@@ -642,7 +642,7 @@ static void
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     arinc653_vcpu_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
@@ -650,7 +650,7 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     idle_vcpu[cpu]->sched_item->priv = vdata;
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
+    per_cpu(sched_res, cpu)->sched_priv = NULL; /* no pdata */
 
     /*
      * (Re?)route the lock to its default location. We actually do not use
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 6552d4c087..e8369b3648 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -82,7 +82,7 @@
 #define CSCHED_PRIV(_ops)   \
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
-    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
+    ((struct csched_pcpu *)per_cpu(sched_res, _c)->sched_priv)
 #define CSCHED_ITEM(item)   ((struct csched_item *) (item)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -248,7 +248,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
            is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
@@ -257,7 +257,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
 static inline void
 inc_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     CSCHED_PCPU(cpu)->nr_runnable++;
 
 }
@@ -265,7 +265,7 @@ inc_nr_runnable(unsigned int cpu)
 static inline void
 dec_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1);
     CSCHED_PCPU(cpu)->nr_runnable--;
 }
@@ -615,7 +615,7 @@ csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     unsigned long flags;
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
 
     /*
      * This is called either during during boot, resume or hotplug, in
@@ -635,7 +635,7 @@ static void
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_item *svc = vdata;
 
@@ -654,7 +654,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     spin_unlock(&prv->lock);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 5a3a0babab..df0e7282ce 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -567,7 +567,7 @@ static inline struct csched2_private *csched2_priv(const struct scheduler *ops)
 
 static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
 {
-    return per_cpu(schedule_data, cpu).sched_priv;
+    return per_cpu(sched_res, cpu)->sched_priv;
 }
 
 static inline struct csched2_item *csched2_item(const struct sched_item *item)
@@ -1276,7 +1276,7 @@ runq_insert(const struct scheduler *ops, struct csched2_item *svc)
     struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     ASSERT(!vcpu_on_runq(svc));
     ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
@@ -1797,7 +1797,7 @@ static bool vcpu_grab_budget(struct csched2_item *svc)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     if ( svc->budget > 0 )
         return true;
@@ -1844,7 +1844,7 @@ vcpu_return_budget(struct csched2_item *svc, struct list_head *parked)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     ASSERT(list_empty(parked));
 
     /* budget_lock nests inside runqueue lock. */
@@ -2101,7 +2101,7 @@ csched2_item_wake(const struct scheduler *ops, struct sched_item *item)
     unsigned int cpu = vc->processor;
     s_time_t now;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     ASSERT(!is_idle_vcpu(vc));
 
@@ -2229,7 +2229,7 @@ csched2_res_pick(const struct scheduler *ops, struct sched_item *item)
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -2569,7 +2569,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
      * on either side may be empty).
      */
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
     update_runq_load(ops, st.lrqd, 0, now);
@@ -3475,7 +3475,7 @@ csched2_schedule(
     rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -3864,7 +3864,7 @@ csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 
     rqi = init_pdata(prv, pdata, cpu);
     /* Move the scheduler lock to the new runq lock. */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock(old_lock);
@@ -3903,10 +3903,10 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * this scheduler, and so it's safe to have taken it /before/ our
      * private global lock.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
+    ASSERT(per_cpu(sched_res, cpu)->schedule_lock != &prv->rqd[rqi].lock);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
@@ -3914,7 +3914,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * taking it, find all the initializations we've done above in place.
      */
     smp_mb();
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     write_unlock(&prv->lock);
 }
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index f7a2650c48..a9cfa163b9 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -168,7 +168,7 @@ static void init_pdata(struct null_private *prv, unsigned int cpu)
 static void null_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
 
     /* alloc_pdata is not implemented, so we want this to be NULL. */
     ASSERT(!pdata);
@@ -277,7 +277,7 @@ pick_res(struct null_private *prv, struct sched_item *item)
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     for_each_affinity_balance_step( bs )
     {
@@ -388,7 +388,7 @@ static void vcpu_deassign(struct null_private *prv, struct vcpu *v,
 static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                               void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct null_private *prv = null_priv(new_ops);
     struct null_item *nvc = vdata;
 
@@ -406,7 +406,7 @@ static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     init_pdata(prv, cpu);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index a3cd00f765..0019646b52 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -75,7 +75,7 @@
 /*
  * Locking:
  * A global system lock is used to protect the RunQ and DepletedQ.
- * The global lock is referenced by schedule_data.schedule_lock
+ * The global lock is referenced by sched_res->schedule_lock
  * from all physical cpus.
  *
  * The lock is already grabbed when calling wake/sleep/schedule/ functions
@@ -176,7 +176,7 @@ static void repl_timer_handler(void *data);
 
 /*
  * System-wide private data, include global RunQueue/DepletedQ
- * Global lock is referenced by schedule_data.schedule_lock from all
+ * Global lock is referenced by sched_res->schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
@@ -723,7 +723,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
     }
 
     /* Move the scheduler lock to our global runqueue lock.  */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->lock;
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
@@ -745,7 +745,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * another scheduler, but that is how things need to be, for
      * preventing races.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->lock);
+    ASSERT(per_cpu(sched_res, cpu)->schedule_lock != &prv->lock);
 
     /*
      * If we are the absolute first cpu being switched toward this
@@ -763,7 +763,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 
     idle_vcpu[cpu]->sched_item->priv = vdata;
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
+    per_cpu(sched_res, cpu)->sched_priv = NULL; /* no pdata */
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
@@ -771,7 +771,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * taking it, find all the initializations we've done above in place.
      */
     smp_mb();
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->lock;
 }
 
 static void
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index b1eb77290a..51db98bcaa 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -61,7 +61,6 @@ static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU(struct sched_resource *, sched_res);
 
@@ -157,7 +156,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              !test_bit(v->vcpu_id, v->domain->poll_mask) )
         {
             v->is_urgent = 0;
-            atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_dec(&per_cpu(sched_res, v->processor)->urgent_count);
         }
     }
     else
@@ -166,7 +165,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
         {
             v->is_urgent = 1;
-            atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_inc(&per_cpu(sched_res, v->processor)->urgent_count);
         }
     }
 }
@@ -177,7 +176,7 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -334,7 +333,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = item;
+        per_cpu(sched_res, v->processor)->curr = item;
         v->is_running = 1;
     }
     else
@@ -459,7 +458,7 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
-        atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
+        atomic_dec(&per_cpu(sched_res, v->processor)->urgent_count);
     sched_remove_item(vcpu_scheduler(v), item);
     sched_free_vdata(vcpu_scheduler(v), item->priv);
     sched_free_item(item);
@@ -506,7 +505,7 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -601,8 +600,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
      */
     if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
     {
-        atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
-        atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+        atomic_inc(&per_cpu(sched_res, new_cpu)->urgent_count);
+        atomic_dec(&per_cpu(sched_res, old_cpu)->urgent_count);
     }
 
     /*
@@ -668,20 +667,20 @@ static void vcpu_migrate_finish(struct vcpu *v)
          * are not correct any longer after evaluating old and new cpu holding
          * the locks.
          */
-        old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
-        new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+        old_lock = per_cpu(sched_res, old_cpu)->schedule_lock;
+        new_lock = per_cpu(sched_res, new_cpu)->schedule_lock;
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
         old_cpu = v->processor;
-        if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+        if ( old_lock == per_cpu(sched_res, old_cpu)->schedule_lock )
         {
             /*
              * If we selected a CPU on the previosu iteration, check if it
              * remains suitable for running this vCPU.
              */
             if ( pick_called &&
-                 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+                 (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -689,7 +688,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
             /* Select a new CPU. */
             new_cpu = sched_pick_resource(vcpu_scheduler(v),
                                           v->sched_item)->processor;
-            if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+            if ( (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
@@ -1472,7 +1471,7 @@ static void schedule(void)
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
-    struct schedule_data *sd;
+    struct sched_resource *sd;
     spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
@@ -1481,7 +1480,7 @@ static void schedule(void)
 
     SCHED_STAT_CRANK(sched_run);
 
-    sd = &this_cpu(schedule_data);
+    sd = this_cpu(sched_res);
 
     /* Update tasklet scheduling status. */
     switch ( *tasklet_work )
@@ -1623,15 +1622,14 @@ static void poll_timer_fn(void *data)
 
 static int cpu_schedule_up(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd;
     void *sched_priv;
-    struct sched_resource *res;
 
-    res = xzalloc(struct sched_resource);
-    if ( res == NULL )
+    sd = xzalloc(struct sched_resource);
+    if ( sd == NULL )
         return -ENOMEM;
-    res->processor = cpu;
-    per_cpu(sched_res, cpu) = res;
+    sd->processor = cpu;
+    per_cpu(sched_res, cpu) = sd;
 
     per_cpu(scheduler, cpu) = &ops;
     spin_lock_init(&sd->_lock);
@@ -1687,7 +1685,7 @@ static int cpu_schedule_up(unsigned int cpu)
 
 static void cpu_schedule_down(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct scheduler *sched = per_cpu(scheduler, cpu);
 
     sched_free_pdata(sched, sd->sched_priv, cpu);
@@ -1707,7 +1705,7 @@ static int cpu_schedule_callback(
 {
     unsigned int cpu = (unsigned long)hcpu;
     struct scheduler *sched = per_cpu(scheduler, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     int rc = 0;
 
     /*
@@ -1840,10 +1838,10 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_item;
-    this_cpu(schedule_data).sched_priv = sched_alloc_pdata(&ops, 0);
-    BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
-    sched_init_pdata(&ops, this_cpu(schedule_data).sched_priv, 0);
+    this_cpu(sched_res)->curr = idle_vcpu[0]->sched_item;
+    this_cpu(sched_res)->sched_priv = sched_alloc_pdata(&ops, 0);
+    BUG_ON(IS_ERR(this_cpu(sched_res)->sched_priv));
+    sched_init_pdata(&ops, this_cpu(sched_res)->sched_priv, 0);
 }
 
 /*
@@ -1923,7 +1921,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     old_lock = pcpu_schedule_lock_irq(cpu);
 
     vpriv_old = idle->sched_item->priv;
-    ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
+    ppriv_old = per_cpu(sched_res, cpu)->sched_priv;
     sched_switch_sched(new_ops, cpu, ppriv, vpriv);
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h
index 08da01803f..f520145752 100644
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -33,7 +33,7 @@ void update_last_cx_stat(struct acpi_processor_power *,
  */
 static inline int sched_has_urgent_vcpu(void)
 {
-    return atomic_read(&this_cpu(schedule_data).urgent_count);
+    return atomic_read(&this_cpu(sched_res)->urgent_count);
 }
 
 #endif /* __X86_ASM_CPUIDLE_H__ */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9a0cd04af7..93617f0459 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -33,22 +33,18 @@ extern int sched_ratelimit_us;
  * For cache betterness, keep the actual lock in the same cache area
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
-struct schedule_data {
+struct sched_resource {
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_item  *curr;           /* current task                    */
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
+    unsigned            processor;
 };
 
-#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
-
-struct sched_resource {
-    unsigned     processor;
-};
+#define curr_on_cpu(c)    (per_cpu(sched_res, c)->curr)
 
-DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
@@ -69,7 +65,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 { \
     for ( ; ; ) \
     { \
-        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        spinlock_t *lock = per_cpu(sched_res, cpu)->schedule_lock; \
         /* \
          * v->processor may change when grabbing the lock; but \
          * per_cpu(v->processor) may also change, if changing cpu pool \
@@ -79,7 +75,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
          * lock may be the same; this will succeed in that case. \
          */ \
         spin_lock##irq(lock, ## arg); \
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+        if ( likely(lock == per_cpu(sched_res, cpu)->schedule_lock) ) \
             return lock; \
         spin_unlock##irq(lock, ## arg); \
     } \
@@ -89,7 +85,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
                                                EXTRA_TYPE(arg), param) \
 { \
-    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    ASSERT(lock == per_cpu(sched_res, cpu)->schedule_lock); \
     spin_unlock##irq(lock, ## arg); \
 }
 
@@ -118,11 +114,11 @@ sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
 
 static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spinlock_t *lock = per_cpu(sched_res, cpu)->schedule_lock;
 
     if ( !spin_trylock(lock) )
         return NULL;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+    if ( lock == per_cpu(sched_res, cpu)->schedule_lock )
         return lock;
     spin_unlock(lock);
     return NULL;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-05-06  6:59 UTC|newest]

Thread overview: 188+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-06  6:55 [PATCH RFC V2 00/45] xen: add core scheduling support Juergen Gross
2019-05-06  6:55 ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 01/45] xen/sched: add inline wrappers for calling per-scheduler functions Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  8:27   ` Jan Beulich
2019-05-06  8:27     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CCFF004020000780022C0D4@suse.com>
2019-05-06  8:34     ` Juergen Gross
2019-05-06  8:34       ` [Xen-devel] " Juergen Gross
2019-05-06  8:58       ` Jan Beulich
2019-05-06  8:58         ` [Xen-devel] " Jan Beulich
2019-05-06 15:26   ` Dario Faggioli
2019-05-06 15:26     ` [Xen-devel] " Dario Faggioli
2019-05-08 16:24   ` George Dunlap
2019-05-08 16:24     ` [Xen-devel] " George Dunlap
2019-05-09  5:32     ` Juergen Gross
2019-05-09  5:32       ` [Xen-devel] " Juergen Gross
2019-05-09 10:04       ` George Dunlap
2019-05-09 10:04         ` [Xen-devel] " George Dunlap
2019-05-09 10:56         ` Juergen Gross
2019-05-09 10:56           ` [Xen-devel] " Juergen Gross
2019-05-09 11:50           ` Jan Beulich
2019-05-09 11:50             ` [Xen-devel] " Jan Beulich
2019-05-09 12:03             ` Juergen Gross
2019-05-09 12:03               ` [Xen-devel] " Juergen Gross
2019-05-09 12:31               ` Jan Beulich
2019-05-09 12:31                 ` [Xen-devel] " Jan Beulich
     [not found]               ` <5CD41D9C020000780022D259@suse.com>
2019-05-09 12:44                 ` Juergen Gross
2019-05-09 12:44                   ` [Xen-devel] " Juergen Gross
2019-05-09 13:22                   ` Jan Beulich
2019-05-09 13:22                     ` [Xen-devel] " Jan Beulich
2019-05-09 12:27           ` Dario Faggioli
2019-05-09 12:27             ` [Xen-devel] " Dario Faggioli
2019-05-06  6:56 ` [PATCH RFC V2 02/45] xen/sched: use new sched_item instead of vcpu in scheduler interfaces Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-08 16:35   ` George Dunlap
2019-05-08 16:35     ` [Xen-devel] " George Dunlap
2019-05-08 17:27     ` Dario Faggioli
2019-05-08 17:27       ` [Xen-devel] " Dario Faggioli
2019-05-09  5:36     ` Juergen Gross
2019-05-09  5:36       ` [Xen-devel] " Juergen Gross
2019-05-09  5:56       ` Dario Faggioli
2019-05-09  5:56         ` [Xen-devel] " Dario Faggioli
2019-05-09  5:59         ` Juergen Gross
2019-05-09  5:59           ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 03/45] xen/sched: alloc struct sched_item for each vcpu Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 04/45] xen/sched: move per-vcpu scheduler private data pointer to sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 05/45] xen/sched: build a linked list of struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 06/45] xen/sched: introduce struct sched_resource Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 07/45] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 08/45] xen/sched: switch schedule_data.curr to point at sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` Juergen Gross [this message]
2019-05-06  6:56   ` [Xen-devel] [PATCH RFC V2 09/45] xen/sched: move per cpu scheduler private data into struct sched_resource Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 10/45] xen/sched: switch vcpu_schedule_lock to item_schedule_lock Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 11/45] xen/sched: move some per-vcpu items to struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 12/45] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 13/45] xen/sched: add domain pointer to struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 14/45] xen/sched: add id " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 15/45] xen/sched: rename scheduler related perf counters Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 16/45] xen/sched: switch struct task_slice from vcpu to sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 17/45] xen/sched: add is_running indicator to struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 18/45] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 19/45] xen/sched: make rt " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 20/45] xen/sched: make credit " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 21/45] xen/sched: make credit2 " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 22/45] xen/sched: make arinc653 " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 23/45] xen: add sched_item_pause_nosync() and sched_item_unpause() Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 24/45] xen: let vcpu_create() select processor Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-16 12:20   ` Jan Beulich
2019-05-16 12:20     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CDD557D020000780022FA32@suse.com>
2019-05-16 12:46     ` Juergen Gross
2019-05-16 12:46       ` [Xen-devel] " Juergen Gross
2019-05-16 13:10       ` Jan Beulich
2019-05-16 13:10         ` [Xen-devel] " Jan Beulich
2019-05-06  6:56 ` [PATCH RFC V2 25/45] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 26/45] xen/sched: switch schedule() from vcpus to sched_items Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 27/45] xen/sched: switch sched_move_irqs() to take sched_item as parameter Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 28/45] xen: switch from for_each_vcpu() to for_each_sched_item() Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 29/45] xen/sched: add runstate counters to struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 30/45] xen/sched: rework and rename vcpu_force_reschedule() Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  8:37   ` Jan Beulich
2019-05-06  8:37     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CCFF238020000780022C0F9@suse.com>
2019-05-06  8:51     ` Juergen Gross
2019-05-06  8:51       ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 31/45] xen/sched: Change vcpu_migrate_*() to operate on schedule item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 32/45] xen/sched: move struct task_slice into struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 33/45] xen/sched: add code to sync scheduling of all vcpus of a sched item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 34/45] xen/sched: introduce item_runnable_state() Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 35/45] xen/sched: add support for multiple vcpus per sched item where missing Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 36/45] x86: make loading of GDT at context switch more modular Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-16 12:30   ` Jan Beulich
2019-05-16 12:30     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CDD57DB020000780022FA5E@suse.com>
2019-05-16 12:52     ` Juergen Gross
2019-05-16 12:52       ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 37/45] x86: optimize loading of GDT at context switch Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-16 12:42   ` Jan Beulich
2019-05-16 12:42     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CDD5AC2020000780022FA6A@suse.com>
2019-05-16 13:10     ` Juergen Gross
2019-05-16 13:10       ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 38/45] xen/sched: modify cpupool_domain_cpumask() to be an item mask Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 39/45] xen/sched: support allocating multiple vcpus into one sched item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 40/45] xen/sched: add a scheduler_percpu_init() function Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 41/45] xen/sched: add a percpu resource index Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 42/45] xen/sched: add fall back to idle vcpu when scheduling item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-16 13:05   ` Jan Beulich
2019-05-16 13:05     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CDD6005020000780022FA9A@suse.com>
2019-05-16 13:51     ` Juergen Gross
2019-05-16 13:51       ` [Xen-devel] " Juergen Gross
2019-05-16 14:41       ` Jan Beulich
2019-05-16 14:41         ` [Xen-devel] " Jan Beulich
     [not found]       ` <5CDD7693020000780022FC59@suse.com>
2019-05-17  5:13         ` Juergen Gross
2019-05-17  5:13           ` [Xen-devel] " Juergen Gross
2019-05-17  6:57           ` Jan Beulich
2019-05-17  6:57             ` [Xen-devel] " Jan Beulich
     [not found]           ` <5CDE5B4E020000780022FEFC@suse.com>
2019-05-17  7:48             ` Juergen Gross
2019-05-17  7:48               ` [Xen-devel] " Juergen Gross
2019-05-17  8:22               ` Jan Beulich
2019-05-17  8:22                 ` [Xen-devel] " Jan Beulich
2019-05-06  6:56 ` [PATCH RFC V2 43/45] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 44/45] xen/sched: carve out freeing sched_item memory into dedicated function Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 45/45] xen/sched: add scheduling granularity enum Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  8:57   ` Jan Beulich
2019-05-06  8:57     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CCFF6F1020000780022C12B@suse.com>
     [not found]     ` <ac57c420*a72e*7570*db8f*27e4693c2755@suse.com>
2019-05-06  9:23     ` Juergen Gross
2019-05-06  9:23       ` [Xen-devel] " Juergen Gross
2019-05-06 10:01       ` Jan Beulich
2019-05-06 10:01         ` [Xen-devel] " Jan Beulich
2019-05-08 14:36         ` Juergen Gross
2019-05-08 14:36           ` [Xen-devel] " Juergen Gross
2019-05-10  8:53           ` Jan Beulich
2019-05-10  8:53             ` [Xen-devel] " Jan Beulich
     [not found]           ` <5CD53C1C020000780022D706@suse.com>
2019-05-10  9:00             ` Juergen Gross
2019-05-10  9:00               ` [Xen-devel] " Juergen Gross
2019-05-10 10:29               ` Dario Faggioli
2019-05-10 10:29                 ` [Xen-devel] " Dario Faggioli
2019-05-10 11:17               ` Jan Beulich
2019-05-10 11:17                 ` [Xen-devel] " Jan Beulich
     [not found]       ` <5CD005E7020000780022C1B5@suse.com>
2019-05-06 10:20         ` Juergen Gross
2019-05-06 10:20           ` [Xen-devel] " Juergen Gross
2019-05-06 11:58           ` Jan Beulich
2019-05-06 11:58             ` [Xen-devel] " Jan Beulich
     [not found]           ` <5CD02161020000780022C257@suse.com>
2019-05-06 12:23             ` Juergen Gross
2019-05-06 12:23               ` [Xen-devel] " Juergen Gross
2019-05-06 13:14               ` Jan Beulich
2019-05-06 13:14                 ` [Xen-devel] " Jan Beulich
2019-05-06  7:10 ` [PATCH RFC V2 00/45] xen: add core scheduling support Juergen Gross
2019-05-06  7:10   ` [Xen-devel] " Juergen Gross
     [not found] <20190506065644.7415****1****jgross@suse.com>
     [not found] <20190506065644.7415*1*jgross@suse.com>
     [not found] ` <20190506065644.7415*2*jgross@suse.com>
     [not found]   ` <1d5f7b35*304c*6a86*5f24*67b79de447dc@citrix.com>
     [not found]     ` <2ca22195*9bdb*b040*ce12*df5bb2416038@suse.com>
     [not found]       ` <0ed82a64*58e7*7ce4*afd1*22f621c0d56d@citrix.com>
     [not found]         ` <a3e3370b*a4a9*9654*368b*f8c13b7f9742@suse.com>

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190506065644.7415-10-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=josh.whitehead@dornerworks.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=robert.vanvossen@dornerworks.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.