All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: "Juergen Gross" <jgross@suse.com>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wl@xen.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"George Dunlap" <George.Dunlap@eu.citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Ian Jackson" <ian.jackson@eu.citrix.com>,
	"Robert VanVossen" <robert.vanvossen@dornerworks.com>,
	"Tim Deegan" <tim@xen.org>, "Julien Grall" <julien.grall@arm.com>,
	"Josh Whitehead" <josh.whitehead@dornerworks.com>,
	"Meng Xu" <mengxu@cis.upenn.edu>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Dario Faggioli" <dfaggioli@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH 11/60] xen/sched: move per cpu scheduler private data into struct sched_resource
Date: Tue, 28 May 2019 12:32:24 +0200	[thread overview]
Message-ID: <20190528103313.1343-12-jgross@suse.com> (raw)
In-Reply-To: <20190528103313.1343-1-jgross@suse.com>

This prepares support of larger scheduling granularities, e.g. core
scheduling.

While at it move sched_has_urgent_vcpu() from include/asm-x86/cpuidle.h
into schedule.c removing the need for including sched-if.h in
cpuidle.h and multiple other C sources.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V1: move sched_has_urgent_vcpu()
---
 xen/arch/x86/acpi/cpu_idle.c      |  1 -
 xen/arch/x86/cpu/mcheck/mce.c     |  1 -
 xen/arch/x86/cpu/mcheck/mctelem.c |  1 -
 xen/arch/x86/setup.c              |  1 -
 xen/arch/x86/smpboot.c            |  1 -
 xen/common/sched_arinc653.c       |  4 +--
 xen/common/sched_credit.c         | 12 +++----
 xen/common/sched_credit2.c        | 21 +++++++------
 xen/common/sched_null.c           |  6 ++--
 xen/common/sched_rt.c             |  9 +++---
 xen/common/schedule.c             | 66 ++++++++++++++++++++++-----------------
 xen/include/asm-x86/cpuidle.h     | 11 -------
 xen/include/xen/sched-if.h        | 20 +++++-------
 xen/include/xen/sched.h           |  1 +
 14 files changed, 73 insertions(+), 82 deletions(-)

diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 8846722bca..9f66f70986 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -38,7 +38,6 @@
 #include <xen/guest_access.h>
 #include <xen/keyhandler.h>
 #include <xen/trace.h>
-#include <xen/sched-if.h>
 #include <xen/irq.h>
 #include <asm/cache.h>
 #include <asm/io.h>
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 30cdb06401..726db75180 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -10,7 +10,6 @@
 #include <xen/errno.h>
 #include <xen/console.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/cpumask.h>
 #include <xen/event.h>
 #include <xen/guest_access.h>
diff --git a/xen/arch/x86/cpu/mcheck/mctelem.c b/xen/arch/x86/cpu/mcheck/mctelem.c
index 3bb13e5265..012a9b95e5 100644
--- a/xen/arch/x86/cpu/mcheck/mctelem.c
+++ b/xen/arch/x86/cpu/mcheck/mctelem.c
@@ -18,7 +18,6 @@
 #include <xen/smp.h>
 #include <xen/errno.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/cpumask.h>
 #include <xen/event.h>
 
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 0ed94a613a..5ad66667ef 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -3,7 +3,6 @@
 #include <xen/err.h>
 #include <xen/grant_table.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/domain.h>
 #include <xen/serial.h>
 #include <xen/softirq.h>
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 274865a705..153bfbb4b7 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -25,7 +25,6 @@
 #include <xen/domain.h>
 #include <xen/domain_page.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/irq.h>
 #include <xen/delay.h>
 #include <xen/softirq.h>
diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 7f26e6a0b0..6e7b2c9968 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == unit )
+    if ( get_sched_res(vc->processor)->curr == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
@@ -642,7 +642,7 @@ static spinlock_t *
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     arinc653_vcpu_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index bc87b813dc..fe4fc5abb1 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -82,7 +82,7 @@
 #define CSCHED_PRIV(_ops)   \
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
-    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
+    ((struct csched_pcpu *)get_sched_res(_c)->sched_priv)
 #define CSCHED_UNIT(unit)   ((struct csched_unit *) (unit)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -248,7 +248,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
            is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
@@ -257,7 +257,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
 static inline void
 inc_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     CSCHED_PCPU(cpu)->nr_runnable++;
 
 }
@@ -265,7 +265,7 @@ inc_nr_runnable(unsigned int cpu)
 static inline void
 dec_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1);
     CSCHED_PCPU(cpu)->nr_runnable--;
 }
@@ -615,7 +615,7 @@ csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     unsigned long flags;
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
 
     /*
      * This is called either during during boot, resume or hotplug, in
@@ -635,7 +635,7 @@ static spinlock_t *
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_unit *svc = vdata;
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 36201815e3..99e993b32f 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -567,7 +567,7 @@ static inline struct csched2_private *csched2_priv(const struct scheduler *ops)
 
 static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
 {
-    return per_cpu(schedule_data, cpu).sched_priv;
+    return get_sched_res(cpu)->sched_priv;
 }
 
 static inline struct csched2_unit *csched2_unit(const struct sched_unit *unit)
@@ -1276,7 +1276,7 @@ runq_insert(const struct scheduler *ops, struct csched2_unit *svc)
     struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!vcpu_on_runq(svc));
     ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
@@ -1797,7 +1797,7 @@ static bool vcpu_grab_budget(struct csched2_unit *svc)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( svc->budget > 0 )
         return true;
@@ -1844,7 +1844,7 @@ vcpu_return_budget(struct csched2_unit *svc, struct list_head *parked)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(list_empty(parked));
 
     /* budget_lock nests inside runqueue lock. */
@@ -2101,7 +2101,7 @@ csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
     unsigned int cpu = vc->processor;
     s_time_t now;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!is_idle_vcpu(vc));
 
@@ -2229,7 +2229,7 @@ csched2_res_pick(const struct scheduler *ops, struct sched_unit *unit)
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -2569,7 +2569,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
      * on either side may be empty).
      */
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
     update_runq_load(ops, st.lrqd, 0, now);
@@ -3475,7 +3475,7 @@ csched2_schedule(
     rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -3863,7 +3863,7 @@ csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 
     rqi = init_pdata(prv, pdata, cpu);
     /* Move the scheduler lock to the new runq lock. */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    get_sched_res(cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock(old_lock);
@@ -3877,6 +3877,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct csched2_private *prv = csched2_priv(new_ops);
     struct csched2_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
     unsigned rqi;
 
     ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
@@ -3902,7 +3903,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * this scheduler, and so it's safe to have taken it /before/ our
      * private global lock.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
+    ASSERT(sd->schedule_lock != &prv->rqd[rqi].lock);
 
     write_unlock(&prv->lock);
 
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index defcdfcf1e..9df6f867aa 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -168,7 +168,7 @@ static void init_pdata(struct null_private *prv, unsigned int cpu)
 static void null_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
 
     /* alloc_pdata is not implemented, so we want this to be NULL. */
     ASSERT(!pdata);
@@ -277,7 +277,7 @@ pick_res(struct null_private *prv, struct sched_unit *unit)
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     for_each_affinity_balance_step( bs )
     {
@@ -389,7 +389,7 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
                                      unsigned int cpu,
                                      void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct null_private *prv = null_priv(new_ops);
     struct null_unit *nvc = vdata;
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 8caec5c5dc..cee0d69d54 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -75,7 +75,7 @@
 /*
  * Locking:
  * A global system lock is used to protect the RunQ and DepletedQ.
- * The global lock is referenced by schedule_data.schedule_lock
+ * The global lock is referenced by sched_res->schedule_lock
  * from all physical cpus.
  *
  * The lock is already grabbed when calling wake/sleep/schedule/ functions
@@ -176,7 +176,7 @@ static void repl_timer_handler(void *data);
 
 /*
  * System-wide private data, include global RunQueue/DepletedQ
- * Global lock is referenced by schedule_data.schedule_lock from all
+ * Global lock is referenced by sched_res->schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
@@ -723,7 +723,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
     }
 
     /* Move the scheduler lock to our global runqueue lock.  */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    get_sched_res(cpu)->schedule_lock = &prv->lock;
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
@@ -736,6 +736,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct rt_private *prv = rt_priv(new_ops);
     struct rt_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
 
@@ -745,7 +746,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * another scheduler, but that is how things need to be, for
      * preventing races.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->lock);
+    ASSERT(sd->schedule_lock != &prv->lock);
 
     /*
      * If we are the absolute first cpu being switched toward this
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index c0cec0b6ca..ea53bd7183 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -61,7 +61,6 @@ static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU(struct sched_resource *, sched_res);
 
@@ -157,7 +156,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              !test_bit(v->vcpu_id, v->domain->poll_mask) )
         {
             v->is_urgent = 0;
-            atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_dec(&get_sched_res(v->processor)->urgent_count);
         }
     }
     else
@@ -166,7 +165,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
         {
             v->is_urgent = 1;
-            atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_inc(&get_sched_res(v->processor)->urgent_count);
         }
     }
 }
@@ -177,7 +176,7 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -334,7 +333,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = unit;
+        get_sched_res(v->processor)->curr = unit;
         v->is_running = 1;
     }
     else
@@ -459,7 +458,7 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
-        atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
+        atomic_dec(&get_sched_res(v->processor)->urgent_count);
     sched_remove_unit(vcpu_scheduler(v), unit);
     sched_free_vdata(vcpu_scheduler(v), unit->priv);
     sched_free_unit(unit);
@@ -506,7 +505,7 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -601,8 +600,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
      */
     if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
     {
-        atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
-        atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+        atomic_inc(&get_sched_res(new_cpu)->urgent_count);
+        atomic_dec(&get_sched_res(old_cpu)->urgent_count);
     }
 
     /*
@@ -668,20 +667,20 @@ static void vcpu_migrate_finish(struct vcpu *v)
          * are not correct any longer after evaluating old and new cpu holding
          * the locks.
          */
-        old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
-        new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+        old_lock = get_sched_res(old_cpu)->schedule_lock;
+        new_lock = get_sched_res(new_cpu)->schedule_lock;
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
         old_cpu = v->processor;
-        if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+        if ( old_lock == get_sched_res(old_cpu)->schedule_lock )
         {
             /*
              * If we selected a CPU on the previosu iteration, check if it
              * remains suitable for running this vCPU.
              */
             if ( pick_called &&
-                 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+                 (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -689,7 +688,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
             /* Select a new CPU. */
             new_cpu = sched_pick_resource(vcpu_scheduler(v),
                                           v->sched_unit)->processor;
-            if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+            if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
@@ -1472,7 +1471,7 @@ static void schedule(void)
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
-    struct schedule_data *sd;
+    struct sched_resource *sd;
     spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
@@ -1481,7 +1480,7 @@ static void schedule(void)
 
     SCHED_STAT_CRANK(sched_run);
 
-    sd = &this_cpu(schedule_data);
+    sd = get_sched_res(cpu);
 
     /* Update tasklet scheduling status. */
     switch ( *tasklet_work )
@@ -1623,15 +1622,14 @@ static void poll_timer_fn(void *data)
 
 static int cpu_schedule_up(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd;
     void *sched_priv;
-    struct sched_resource *res;
 
-    res = xzalloc(struct sched_resource);
-    if ( res == NULL )
+    sd = xzalloc(struct sched_resource);
+    if ( sd == NULL )
         return -ENOMEM;
-    res->processor = cpu;
-    set_sched_res(cpu, res);
+    sd->processor = cpu;
+    set_sched_res(cpu, sd);
 
     per_cpu(scheduler, cpu) = &ops;
     spin_lock_init(&sd->_lock);
@@ -1687,7 +1685,7 @@ static int cpu_schedule_up(unsigned int cpu)
 
 static void cpu_schedule_down(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct scheduler *sched = per_cpu(scheduler, cpu);
 
     sched_free_pdata(sched, sd->sched_priv, cpu);
@@ -1707,7 +1705,7 @@ static int cpu_schedule_callback(
 {
     unsigned int cpu = (unsigned long)hcpu;
     struct scheduler *sched = per_cpu(scheduler, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     int rc = 0;
 
     /*
@@ -1864,10 +1862,10 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_unit;
-    this_cpu(schedule_data).sched_priv = sched_alloc_pdata(&ops, 0);
-    BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
-    sched_init_pdata(&ops, this_cpu(schedule_data).sched_priv, 0);
+    get_sched_res(0)->curr = idle_vcpu[0]->sched_unit;
+    get_sched_res(0)->sched_priv = sched_alloc_pdata(&ops, 0);
+    BUG_ON(IS_ERR(get_sched_res(0)->sched_priv));
+    sched_init_pdata(&ops, get_sched_res(0)->sched_priv, 0);
 }
 
 /*
@@ -1888,7 +1886,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
     struct cpupool *old_pool = per_cpu(cpupool, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     spinlock_t *old_lock, *new_lock;
 
     /*
@@ -2074,6 +2072,16 @@ void wait(void)
     schedule();
 }
 
+/*
+ * vcpu is urgent if vcpu is polling event channel
+ *
+ * if urgent vcpu exists, CPU should not enter deep C state
+ */
+int sched_has_urgent_vcpu(void)
+{
+    return atomic_read(&get_sched_res(smp_processor_id())->urgent_count);
+}
+
 #ifdef CONFIG_COMPAT
 #include "compat/schedule.c"
 #endif
diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h
index 488f708305..5d7dffd228 100644
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -4,7 +4,6 @@
 #include <xen/cpuidle.h>
 #include <xen/notifier.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 
 extern struct acpi_processor_power *processor_powers[];
 
@@ -27,14 +26,4 @@ void update_idle_stats(struct acpi_processor_power *,
 void update_last_cx_stat(struct acpi_processor_power *,
                          struct acpi_processor_cx *, uint64_t);
 
-/*
- * vcpu is urgent if vcpu is polling event channel
- *
- * if urgent vcpu exists, CPU should not enter deep C state
- */
-static inline int sched_has_urgent_vcpu(void)
-{
-    return atomic_read(&this_cpu(schedule_data).urgent_count);
-}
-
 #endif /* __X86_ASM_CPUIDLE_H__ */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9b7855e775..0443fe1d7e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -33,22 +33,18 @@ extern int sched_ratelimit_us;
  * For cache betterness, keep the actual lock in the same cache area
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
-struct schedule_data {
+struct sched_resource {
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;           /* current task                    */
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
+    unsigned int        processor;
 };
 
-#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
-
-struct sched_resource {
-    unsigned int processor;
-};
+#define curr_on_cpu(c)    (get_sched_res(c)->curr)
 
-DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
@@ -79,7 +75,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 { \
     for ( ; ; ) \
     { \
-        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        spinlock_t *lock = get_sched_res(cpu)->schedule_lock; \
         /* \
          * v->processor may change when grabbing the lock; but \
          * per_cpu(v->processor) may also change, if changing cpu pool \
@@ -89,7 +85,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
          * lock may be the same; this will succeed in that case. \
          */ \
         spin_lock##irq(lock, ## arg); \
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+        if ( likely(lock == get_sched_res(cpu)->schedule_lock) ) \
             return lock; \
         spin_unlock##irq(lock, ## arg); \
     } \
@@ -99,7 +95,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
                                                EXTRA_TYPE(arg), param) \
 { \
-    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    ASSERT(lock == get_sched_res(cpu)->schedule_lock); \
     spin_unlock##irq(lock, ## arg); \
 }
 
@@ -128,11 +124,11 @@ sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
 
 static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spinlock_t *lock = get_sched_res(cpu)->schedule_lock;
 
     if ( !spin_trylock(lock) )
         return NULL;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+    if ( lock == get_sched_res(cpu)->schedule_lock )
         return lock;
     spin_unlock(lock);
     return NULL;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index d3a1a31c86..ee316cddd7 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -900,6 +900,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
+int sched_has_urgent_vcpu(void);
 
 /*
  * Used by idle loop to decide whether there is work to do:
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

WARNING: multiple messages have this Message-ID (diff)
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: "Juergen Gross" <jgross@suse.com>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wl@xen.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"George Dunlap" <George.Dunlap@eu.citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Ian Jackson" <ian.jackson@eu.citrix.com>,
	"Robert VanVossen" <robert.vanvossen@dornerworks.com>,
	"Tim Deegan" <tim@xen.org>, "Julien Grall" <julien.grall@arm.com>,
	"Josh Whitehead" <josh.whitehead@dornerworks.com>,
	"Meng Xu" <mengxu@cis.upenn.edu>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Dario Faggioli" <dfaggioli@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH 11/60] xen/sched: move per cpu scheduler private data into struct sched_resource
Date: Tue, 28 May 2019 12:32:24 +0200	[thread overview]
Message-ID: <20190528103313.1343-12-jgross@suse.com> (raw)
Message-ID: <20190528103224.MIJvDGpIVx1E0JAjjtD8bjig9HzbhkWuCVj6LKUNhvg@z> (raw)
In-Reply-To: <20190528103313.1343-1-jgross@suse.com>

This prepares support of larger scheduling granularities, e.g. core
scheduling.

While at it move sched_has_urgent_vcpu() from include/asm-x86/cpuidle.h
into schedule.c removing the need for including sched-if.h in
cpuidle.h and multiple other C sources.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V1: move sched_has_urgent_vcpu()
---
 xen/arch/x86/acpi/cpu_idle.c      |  1 -
 xen/arch/x86/cpu/mcheck/mce.c     |  1 -
 xen/arch/x86/cpu/mcheck/mctelem.c |  1 -
 xen/arch/x86/setup.c              |  1 -
 xen/arch/x86/smpboot.c            |  1 -
 xen/common/sched_arinc653.c       |  4 +--
 xen/common/sched_credit.c         | 12 +++----
 xen/common/sched_credit2.c        | 21 +++++++------
 xen/common/sched_null.c           |  6 ++--
 xen/common/sched_rt.c             |  9 +++---
 xen/common/schedule.c             | 66 ++++++++++++++++++++++-----------------
 xen/include/asm-x86/cpuidle.h     | 11 -------
 xen/include/xen/sched-if.h        | 20 +++++-------
 xen/include/xen/sched.h           |  1 +
 14 files changed, 73 insertions(+), 82 deletions(-)

diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 8846722bca..9f66f70986 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -38,7 +38,6 @@
 #include <xen/guest_access.h>
 #include <xen/keyhandler.h>
 #include <xen/trace.h>
-#include <xen/sched-if.h>
 #include <xen/irq.h>
 #include <asm/cache.h>
 #include <asm/io.h>
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 30cdb06401..726db75180 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -10,7 +10,6 @@
 #include <xen/errno.h>
 #include <xen/console.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/cpumask.h>
 #include <xen/event.h>
 #include <xen/guest_access.h>
diff --git a/xen/arch/x86/cpu/mcheck/mctelem.c b/xen/arch/x86/cpu/mcheck/mctelem.c
index 3bb13e5265..012a9b95e5 100644
--- a/xen/arch/x86/cpu/mcheck/mctelem.c
+++ b/xen/arch/x86/cpu/mcheck/mctelem.c
@@ -18,7 +18,6 @@
 #include <xen/smp.h>
 #include <xen/errno.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/cpumask.h>
 #include <xen/event.h>
 
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 0ed94a613a..5ad66667ef 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -3,7 +3,6 @@
 #include <xen/err.h>
 #include <xen/grant_table.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/domain.h>
 #include <xen/serial.h>
 #include <xen/softirq.h>
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 274865a705..153bfbb4b7 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -25,7 +25,6 @@
 #include <xen/domain.h>
 #include <xen/domain_page.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/irq.h>
 #include <xen/delay.h>
 #include <xen/softirq.h>
diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 7f26e6a0b0..6e7b2c9968 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == unit )
+    if ( get_sched_res(vc->processor)->curr == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
@@ -642,7 +642,7 @@ static spinlock_t *
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     arinc653_vcpu_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index bc87b813dc..fe4fc5abb1 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -82,7 +82,7 @@
 #define CSCHED_PRIV(_ops)   \
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
-    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
+    ((struct csched_pcpu *)get_sched_res(_c)->sched_priv)
 #define CSCHED_UNIT(unit)   ((struct csched_unit *) (unit)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -248,7 +248,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
            is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
@@ -257,7 +257,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
 static inline void
 inc_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     CSCHED_PCPU(cpu)->nr_runnable++;
 
 }
@@ -265,7 +265,7 @@ inc_nr_runnable(unsigned int cpu)
 static inline void
 dec_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1);
     CSCHED_PCPU(cpu)->nr_runnable--;
 }
@@ -615,7 +615,7 @@ csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     unsigned long flags;
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
 
     /*
      * This is called either during during boot, resume or hotplug, in
@@ -635,7 +635,7 @@ static spinlock_t *
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_unit *svc = vdata;
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 36201815e3..99e993b32f 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -567,7 +567,7 @@ static inline struct csched2_private *csched2_priv(const struct scheduler *ops)
 
 static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
 {
-    return per_cpu(schedule_data, cpu).sched_priv;
+    return get_sched_res(cpu)->sched_priv;
 }
 
 static inline struct csched2_unit *csched2_unit(const struct sched_unit *unit)
@@ -1276,7 +1276,7 @@ runq_insert(const struct scheduler *ops, struct csched2_unit *svc)
     struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!vcpu_on_runq(svc));
     ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
@@ -1797,7 +1797,7 @@ static bool vcpu_grab_budget(struct csched2_unit *svc)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( svc->budget > 0 )
         return true;
@@ -1844,7 +1844,7 @@ vcpu_return_budget(struct csched2_unit *svc, struct list_head *parked)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(list_empty(parked));
 
     /* budget_lock nests inside runqueue lock. */
@@ -2101,7 +2101,7 @@ csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
     unsigned int cpu = vc->processor;
     s_time_t now;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!is_idle_vcpu(vc));
 
@@ -2229,7 +2229,7 @@ csched2_res_pick(const struct scheduler *ops, struct sched_unit *unit)
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -2569,7 +2569,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
      * on either side may be empty).
      */
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
     update_runq_load(ops, st.lrqd, 0, now);
@@ -3475,7 +3475,7 @@ csched2_schedule(
     rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -3863,7 +3863,7 @@ csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 
     rqi = init_pdata(prv, pdata, cpu);
     /* Move the scheduler lock to the new runq lock. */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    get_sched_res(cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock(old_lock);
@@ -3877,6 +3877,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct csched2_private *prv = csched2_priv(new_ops);
     struct csched2_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
     unsigned rqi;
 
     ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
@@ -3902,7 +3903,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * this scheduler, and so it's safe to have taken it /before/ our
      * private global lock.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
+    ASSERT(sd->schedule_lock != &prv->rqd[rqi].lock);
 
     write_unlock(&prv->lock);
 
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index defcdfcf1e..9df6f867aa 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -168,7 +168,7 @@ static void init_pdata(struct null_private *prv, unsigned int cpu)
 static void null_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
 
     /* alloc_pdata is not implemented, so we want this to be NULL. */
     ASSERT(!pdata);
@@ -277,7 +277,7 @@ pick_res(struct null_private *prv, struct sched_unit *unit)
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     for_each_affinity_balance_step( bs )
     {
@@ -389,7 +389,7 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
                                      unsigned int cpu,
                                      void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct null_private *prv = null_priv(new_ops);
     struct null_unit *nvc = vdata;
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 8caec5c5dc..cee0d69d54 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -75,7 +75,7 @@
 /*
  * Locking:
  * A global system lock is used to protect the RunQ and DepletedQ.
- * The global lock is referenced by schedule_data.schedule_lock
+ * The global lock is referenced by sched_res->schedule_lock
  * from all physical cpus.
  *
  * The lock is already grabbed when calling wake/sleep/schedule/ functions
@@ -176,7 +176,7 @@ static void repl_timer_handler(void *data);
 
 /*
  * System-wide private data, include global RunQueue/DepletedQ
- * Global lock is referenced by schedule_data.schedule_lock from all
+ * Global lock is referenced by sched_res->schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
@@ -723,7 +723,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
     }
 
     /* Move the scheduler lock to our global runqueue lock.  */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    get_sched_res(cpu)->schedule_lock = &prv->lock;
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
@@ -736,6 +736,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct rt_private *prv = rt_priv(new_ops);
     struct rt_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
 
@@ -745,7 +746,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * another scheduler, but that is how things need to be, for
      * preventing races.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->lock);
+    ASSERT(sd->schedule_lock != &prv->lock);
 
     /*
      * If we are the absolute first cpu being switched toward this
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index c0cec0b6ca..ea53bd7183 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -61,7 +61,6 @@ static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU(struct sched_resource *, sched_res);
 
@@ -157,7 +156,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              !test_bit(v->vcpu_id, v->domain->poll_mask) )
         {
             v->is_urgent = 0;
-            atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_dec(&get_sched_res(v->processor)->urgent_count);
         }
     }
     else
@@ -166,7 +165,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
         {
             v->is_urgent = 1;
-            atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_inc(&get_sched_res(v->processor)->urgent_count);
         }
     }
 }
@@ -177,7 +176,7 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -334,7 +333,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = unit;
+        get_sched_res(v->processor)->curr = unit;
         v->is_running = 1;
     }
     else
@@ -459,7 +458,7 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
-        atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
+        atomic_dec(&get_sched_res(v->processor)->urgent_count);
     sched_remove_unit(vcpu_scheduler(v), unit);
     sched_free_vdata(vcpu_scheduler(v), unit->priv);
     sched_free_unit(unit);
@@ -506,7 +505,7 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -601,8 +600,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
      */
     if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
     {
-        atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
-        atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+        atomic_inc(&get_sched_res(new_cpu)->urgent_count);
+        atomic_dec(&get_sched_res(old_cpu)->urgent_count);
     }
 
     /*
@@ -668,20 +667,20 @@ static void vcpu_migrate_finish(struct vcpu *v)
          * are not correct any longer after evaluating old and new cpu holding
          * the locks.
          */
-        old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
-        new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+        old_lock = get_sched_res(old_cpu)->schedule_lock;
+        new_lock = get_sched_res(new_cpu)->schedule_lock;
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
         old_cpu = v->processor;
-        if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+        if ( old_lock == get_sched_res(old_cpu)->schedule_lock )
         {
             /*
              * If we selected a CPU on the previosu iteration, check if it
              * remains suitable for running this vCPU.
              */
             if ( pick_called &&
-                 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+                 (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -689,7 +688,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
             /* Select a new CPU. */
             new_cpu = sched_pick_resource(vcpu_scheduler(v),
                                           v->sched_unit)->processor;
-            if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+            if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
@@ -1472,7 +1471,7 @@ static void schedule(void)
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
-    struct schedule_data *sd;
+    struct sched_resource *sd;
     spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
@@ -1481,7 +1480,7 @@ static void schedule(void)
 
     SCHED_STAT_CRANK(sched_run);
 
-    sd = &this_cpu(schedule_data);
+    sd = get_sched_res(cpu);
 
     /* Update tasklet scheduling status. */
     switch ( *tasklet_work )
@@ -1623,15 +1622,14 @@ static void poll_timer_fn(void *data)
 
 static int cpu_schedule_up(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd;
     void *sched_priv;
-    struct sched_resource *res;
 
-    res = xzalloc(struct sched_resource);
-    if ( res == NULL )
+    sd = xzalloc(struct sched_resource);
+    if ( sd == NULL )
         return -ENOMEM;
-    res->processor = cpu;
-    set_sched_res(cpu, res);
+    sd->processor = cpu;
+    set_sched_res(cpu, sd);
 
     per_cpu(scheduler, cpu) = &ops;
     spin_lock_init(&sd->_lock);
@@ -1687,7 +1685,7 @@ static int cpu_schedule_up(unsigned int cpu)
 
 static void cpu_schedule_down(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct scheduler *sched = per_cpu(scheduler, cpu);
 
     sched_free_pdata(sched, sd->sched_priv, cpu);
@@ -1707,7 +1705,7 @@ static int cpu_schedule_callback(
 {
     unsigned int cpu = (unsigned long)hcpu;
     struct scheduler *sched = per_cpu(scheduler, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     int rc = 0;
 
     /*
@@ -1864,10 +1862,10 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_unit;
-    this_cpu(schedule_data).sched_priv = sched_alloc_pdata(&ops, 0);
-    BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
-    sched_init_pdata(&ops, this_cpu(schedule_data).sched_priv, 0);
+    get_sched_res(0)->curr = idle_vcpu[0]->sched_unit;
+    get_sched_res(0)->sched_priv = sched_alloc_pdata(&ops, 0);
+    BUG_ON(IS_ERR(get_sched_res(0)->sched_priv));
+    sched_init_pdata(&ops, get_sched_res(0)->sched_priv, 0);
 }
 
 /*
@@ -1888,7 +1886,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
     struct cpupool *old_pool = per_cpu(cpupool, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     spinlock_t *old_lock, *new_lock;
 
     /*
@@ -2074,6 +2072,16 @@ void wait(void)
     schedule();
 }
 
+/*
+ * vcpu is urgent if vcpu is polling event channel
+ *
+ * if urgent vcpu exists, CPU should not enter deep C state
+ */
+int sched_has_urgent_vcpu(void)
+{
+    return atomic_read(&get_sched_res(smp_processor_id())->urgent_count);
+}
+
 #ifdef CONFIG_COMPAT
 #include "compat/schedule.c"
 #endif
diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h
index 488f708305..5d7dffd228 100644
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -4,7 +4,6 @@
 #include <xen/cpuidle.h>
 #include <xen/notifier.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 
 extern struct acpi_processor_power *processor_powers[];
 
@@ -27,14 +26,4 @@ void update_idle_stats(struct acpi_processor_power *,
 void update_last_cx_stat(struct acpi_processor_power *,
                          struct acpi_processor_cx *, uint64_t);
 
-/*
- * vcpu is urgent if vcpu is polling event channel
- *
- * if urgent vcpu exists, CPU should not enter deep C state
- */
-static inline int sched_has_urgent_vcpu(void)
-{
-    return atomic_read(&this_cpu(schedule_data).urgent_count);
-}
-
 #endif /* __X86_ASM_CPUIDLE_H__ */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9b7855e775..0443fe1d7e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -33,22 +33,18 @@ extern int sched_ratelimit_us;
  * For cache betterness, keep the actual lock in the same cache area
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
-struct schedule_data {
+struct sched_resource {
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;           /* current task                    */
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
+    unsigned int        processor;
 };
 
-#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
-
-struct sched_resource {
-    unsigned int processor;
-};
+#define curr_on_cpu(c)    (get_sched_res(c)->curr)
 
-DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
@@ -79,7 +75,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 { \
     for ( ; ; ) \
     { \
-        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        spinlock_t *lock = get_sched_res(cpu)->schedule_lock; \
         /* \
          * v->processor may change when grabbing the lock; but \
          * per_cpu(v->processor) may also change, if changing cpu pool \
@@ -89,7 +85,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
          * lock may be the same; this will succeed in that case. \
          */ \
         spin_lock##irq(lock, ## arg); \
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+        if ( likely(lock == get_sched_res(cpu)->schedule_lock) ) \
             return lock; \
         spin_unlock##irq(lock, ## arg); \
     } \
@@ -99,7 +95,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
                                                EXTRA_TYPE(arg), param) \
 { \
-    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    ASSERT(lock == get_sched_res(cpu)->schedule_lock); \
     spin_unlock##irq(lock, ## arg); \
 }
 
@@ -128,11 +124,11 @@ sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
 
 static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spinlock_t *lock = get_sched_res(cpu)->schedule_lock;
 
     if ( !spin_trylock(lock) )
         return NULL;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+    if ( lock == get_sched_res(cpu)->schedule_lock )
         return lock;
     spin_unlock(lock);
     return NULL;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index d3a1a31c86..ee316cddd7 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -900,6 +900,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
+int sched_has_urgent_vcpu(void);
 
 /*
  * Used by idle loop to decide whether there is work to do:
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-05-28 10:33 UTC|newest]

Thread overview: 202+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-28 10:32 [PATCH 00/60] xen: add core scheduling support Juergen Gross
2019-05-28 10:32 ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 01/60] xen/sched: only allow schedulers with all mandatory functions available Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:03   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 02/60] xen/sched: add inline wrappers for calling per-scheduler functions Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:21   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 03/60] xen/sched: let sched_switch_sched() return new lock address Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:55   ` Dario Faggioli
2019-06-12  7:40     ` Jan Beulich
2019-06-12  8:06       ` Juergen Gross
2019-06-12  9:44         ` Dario Faggioli
2019-06-12  8:05   ` Andrew Cooper
2019-06-12  8:19     ` Juergen Gross
2019-06-12  9:32       ` Jan Beulich
     [not found]       ` <5D00C6960200007800237622@suse.com>
2019-06-12  9:56         ` Dario Faggioli
2019-06-12 10:14           ` Jan Beulich
2019-06-12 11:27             ` Andrew Cooper
2019-06-12 13:32               ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 04/60] xen/sched: use new sched_unit instead of vcpu in scheduler interfaces Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 17:44   ` Dario Faggioli
2019-07-19  4:49     ` Juergen Gross
2019-07-19 17:01       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 05/60] xen/sched: alloc struct sched_unit for each vcpu Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 17:57   ` Dario Faggioli
2019-07-19  4:56     ` Juergen Gross
2019-07-19 17:04       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 06/60] xen/sched: move per-vcpu scheduler private data pointer to sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 22:52   ` Dario Faggioli
2019-07-19  5:03     ` Juergen Gross
2019-07-19 17:10       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 07/60] xen/sched: build a linked list of struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19  0:01   ` Dario Faggioli
2019-07-19  5:07     ` Juergen Gross
2019-07-19 17:16       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 08/60] xen/sched: introduce struct sched_resource Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19 17:43   ` Dario Faggioli
2019-07-19 17:49   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 09/60] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19 18:06   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 10/60] xen/sched: switch schedule_data.curr to point at sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-29 22:08   ` Dario Faggioli
2019-05-28 10:32 ` Juergen Gross [this message]
2019-05-28 10:32   ` [Xen-devel] [PATCH 11/60] xen/sched: move per cpu scheduler private data into struct sched_resource Juergen Gross
2019-05-28 11:32   ` Jan Beulich
2019-05-28 11:32     ` [Xen-devel] " Jan Beulich
2019-07-29 22:22   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 12/60] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 13/60] xen/sched: move some per-vcpu items to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-13  7:18   ` Andrii Anisov
2019-06-13  7:29     ` Juergen Gross
2019-06-13  7:34       ` Andrii Anisov
2019-06-13  8:39         ` Juergen Gross
2019-06-13  8:49           ` Andrii Anisov
2019-05-28 10:32 ` [PATCH 14/60] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 15/60] xen/sched: add domain pointer to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 16/60] xen/sched: add id " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 17/60] xen/sched: rename scheduler related perf counters Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 18/60] xen/sched: switch struct task_slice from vcpu to sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 19/60] xen/sched: add is_running indicator to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 20/60] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 21/60] xen/sched: make rt " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 22/60] xen/sched: make credit " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 23/60] xen/sched: make credit2 " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 24/60] xen/sched: make arinc653 " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 25/60] xen: add sched_unit_pause_nosync() and sched_unit_unpause() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 26/60] xen: let vcpu_create() select processor Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 27/60] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 28/60] xen/sched: switch schedule() from vcpus to sched_units Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 29/60] xen/sched: switch sched_move_irqs() to take sched_unit as parameter Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 30/60] xen: switch from for_each_vcpu() to for_each_sched_unit() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 31/60] xen/sched: add runstate counters to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 32/60] xen/sched: rework and rename vcpu_force_reschedule() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 33/60] xen/sched: Change vcpu_migrate_*() to operate on schedule unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 34/60] xen/sched: move struct task_slice into struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 35/60] xen/sched: add code to sync scheduling of all vcpus of a sched unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 36/60] xen/sched: introduce unit_runnable_state() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 37/60] xen/sched: add support for multiple vcpus per sched unit where missing Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 38/60] x86: make loading of GDT at context switch more modular Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-02 15:38   ` Andrew Cooper
2019-05-28 10:32 ` [PATCH 39/60] x86: optimize loading of GDT at context switch Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-02 16:09   ` Andrew Cooper
2019-07-03  6:30     ` Juergen Gross
2019-07-03 12:21       ` Andrew Cooper
2019-07-05  7:30         ` Juergen Gross
2019-05-28 10:32 ` [PATCH 40/60] xen/sched: modify cpupool_domain_cpumask() to be an unit mask Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 41/60] xen/sched: support allocating multiple vcpus into one sched unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 42/60] xen/sched: add a scheduler_percpu_init() function Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 43/60] xen/sched: add a percpu resource index Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 44/60] xen/sched: add fall back to idle vcpu when scheduling unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 45/60] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 46/60] xen/sched: carve out freeing sched_unit memory into dedicated function Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 47/60] xen/sched: move per-cpu variable scheduler to struct sched_resource Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 48/60] xen/sched: move per-cpu variable cpupool " Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 49/60] xen/sched: reject switching smt on/off with core scheduling active Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:44   ` Jan Beulich
2019-05-28 11:44     ` [Xen-devel] " Jan Beulich
2019-05-28 11:52     ` Juergen Gross
2019-05-28 11:52       ` [Xen-devel] " Juergen Gross
2019-06-12  9:36       ` Dario Faggioli
2019-05-28 10:33 ` [PATCH 50/60] xen/sched: prepare per-cpupool scheduling granularity Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 51/60] xen/sched: use one schedule lock for all free cpus Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 52/60] xen/sched: populate cpupool0 only after all cpus are up Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 53/60] xen/sched: remove cpu from pool0 before removing it Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 54/60] xen/sched: add minimalistic idle scheduler for free cpus Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:47   ` Jan Beulich
2019-05-28 11:47     ` [Xen-devel] " Jan Beulich
2019-05-28 11:58     ` Juergen Gross
2019-05-28 11:58       ` [Xen-devel] " Juergen Gross
2019-05-31 14:15       ` Dario Faggioli
2019-05-31 14:15         ` [Xen-devel] " Dario Faggioli
2019-05-31 15:52   ` Dario Faggioli
2019-05-31 15:52     ` [Xen-devel] " Dario Faggioli
2019-05-31 16:44     ` Juergen Gross
2019-05-31 16:44       ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 55/60] xen/sched: split schedule_cpu_switch() Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 56/60] xen/sched: protect scheduling resource via rcu Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 57/60] xen/sched: support multiple cpus per scheduling resource Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 58/60] xen/sched: support differing granularity in schedule_cpu_[add/rm]() Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 59/60] xen/sched: support core scheduling for moving cpus to/from cpupools Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 60/60] xen/sched: add scheduling granularity enum Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:51   ` Jan Beulich
2019-05-28 11:51     ` [Xen-devel] " Jan Beulich
2019-05-28 12:02     ` Juergen Gross
2019-05-28 12:02       ` [Xen-devel] " Juergen Gross
2019-07-19 18:31   ` Dario Faggioli
2019-07-05 13:17 ` [Xen-devel] [PATCH 00/60] xen: add core scheduling support Sergey Dyasli
2019-07-05 13:22   ` Juergen Gross
2019-07-05 13:56   ` Dario Faggioli
2019-07-15 14:08     ` Sergey Dyasli
2019-07-18 14:48       ` Juergen Gross
2019-07-18 15:14         ` Sergey Dyasli
2019-07-18 16:04           ` Dario Faggioli
2019-07-19  5:41           ` Juergen Gross
2019-07-19 11:24             ` Juergen Gross
2019-07-19 13:57           ` Juergen Gross
2019-07-22 14:22             ` Sergey Dyasli
2019-07-24  9:13               ` Juergen Gross
2019-07-24 14:54                 ` Sergey Dyasli
2019-07-24 15:11                   ` Juergen Gross
2019-07-16 15:45   ` Sergey Dyasli
2019-07-19 13:35     ` Juergen Gross
2019-07-25 16:01   ` Sergey Dyasli
2019-07-11 13:40 ` Dario Faggioli
     [not found] <20190528103313.1343„1„jgross@suse.com>
     [not found] ` <20190528103313.1343„4„jgross@suse.com>
     [not found] <20190528103313.13431jgross@suse.com>
     [not found] ` <20190528103313.13434jgross@suse.com>

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190528103313.1343-12-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=josh.whitehead@dornerworks.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=robert.vanvossen@dornerworks.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.