xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: "Juergen Gross" <jgross@suse.com>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wl@xen.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"George Dunlap" <George.Dunlap@eu.citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Ian Jackson" <ian.jackson@eu.citrix.com>,
	"Robert VanVossen" <robert.vanvossen@dornerworks.com>,
	"Tim Deegan" <tim@xen.org>, "Julien Grall" <julien.grall@arm.com>,
	"Josh Whitehead" <josh.whitehead@dornerworks.com>,
	"Meng Xu" <mengxu@cis.upenn.edu>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Dario Faggioli" <dfaggioli@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v2 07/48] xen/sched: move per cpu scheduler private data into struct sched_resource
Date: Fri,  9 Aug 2019 16:57:52 +0200	[thread overview]
Message-ID: <20190809145833.1020-8-jgross@suse.com> (raw)
In-Reply-To: <20190809145833.1020-1-jgross@suse.com>

This prepares support of larger scheduling granularities, e.g. core
scheduling.

While at it move sched_has_urgent_vcpu() from include/asm-x86/cpuidle.h
into schedule.c removing the need for including sched-if.h in
cpuidle.h and multiple other C sources.

Signed-off-by: Juergen Gross <jgross@suse.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>
---
V1:
- move sched_has_urgent_vcpu()
V2:
- make sched_has_urgent_vcpu() return bool (Jan Beulich)
---
 xen/arch/x86/acpi/cpu_idle.c      |  1 -
 xen/arch/x86/cpu/mcheck/mce.c     |  1 -
 xen/arch/x86/cpu/mcheck/mctelem.c |  1 -
 xen/arch/x86/setup.c              |  1 -
 xen/arch/x86/smpboot.c            |  1 -
 xen/common/sched_arinc653.c       |  4 +--
 xen/common/sched_credit.c         | 10 +++----
 xen/common/sched_credit2.c        | 21 +++++++-------
 xen/common/sched_null.c           |  4 +--
 xen/common/sched_rt.c             |  9 +++---
 xen/common/schedule.c             | 60 ++++++++++++++++++++++-----------------
 xen/include/asm-x86/cpuidle.h     | 11 -------
 xen/include/xen/sched-if.h        | 20 ++++++-------
 xen/include/xen/sched.h           |  1 +
 14 files changed, 68 insertions(+), 77 deletions(-)

diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 8f7b6e9b8c..836f524ef4 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -38,7 +38,6 @@
 #include <xen/guest_access.h>
 #include <xen/keyhandler.h>
 #include <xen/trace.h>
-#include <xen/sched-if.h>
 #include <xen/irq.h>
 #include <asm/cache.h>
 #include <asm/io.h>
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 28ad7dd659..4b2b6de191 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -10,7 +10,6 @@
 #include <xen/errno.h>
 #include <xen/console.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/cpumask.h>
 #include <xen/event.h>
 #include <xen/guest_access.h>
diff --git a/xen/arch/x86/cpu/mcheck/mctelem.c b/xen/arch/x86/cpu/mcheck/mctelem.c
index 3bb13e5265..012a9b95e5 100644
--- a/xen/arch/x86/cpu/mcheck/mctelem.c
+++ b/xen/arch/x86/cpu/mcheck/mctelem.c
@@ -18,7 +18,6 @@
 #include <xen/smp.h>
 #include <xen/errno.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/cpumask.h>
 #include <xen/event.h>
 
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index d2011910fa..28c35bbac9 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -3,7 +3,6 @@
 #include <xen/err.h>
 #include <xen/grant_table.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/domain.h>
 #include <xen/serial.h>
 #include <xen/softirq.h>
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 8d5fef0012..27b78a0b61 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -25,7 +25,6 @@
 #include <xen/domain.h>
 #include <xen/domain_page.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 #include <xen/irq.h>
 #include <xen/delay.h>
 #include <xen/softirq.h>
diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 34b1f235a2..25fbc42f74 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == unit )
+    if ( get_sched_res(vc->processor)->curr == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
@@ -642,7 +642,7 @@ static spinlock_t *
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     arinc653_vcpu_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 7c3dbae892..261d2083c7 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -82,7 +82,7 @@
 #define CSCHED_PRIV(_ops)   \
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
-    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
+    ((struct csched_pcpu *)get_sched_res(_c)->sched_priv)
 #define CSCHED_UNIT(unit)   ((struct csched_unit *) (unit)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -250,7 +250,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
            is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
@@ -259,7 +259,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
 static inline void
 inc_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     CSCHED_PCPU(cpu)->nr_runnable++;
 
 }
@@ -267,7 +267,7 @@ inc_nr_runnable(unsigned int cpu)
 static inline void
 dec_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1);
     CSCHED_PCPU(cpu)->nr_runnable--;
 }
@@ -628,7 +628,7 @@ static spinlock_t *
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_unit *svc = vdata;
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index f856293371..02e2855d8d 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -568,7 +568,7 @@ static inline struct csched2_private *csched2_priv(const struct scheduler *ops)
 
 static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
 {
-    return per_cpu(schedule_data, cpu).sched_priv;
+    return get_sched_res(cpu)->sched_priv;
 }
 
 static inline struct csched2_unit *csched2_unit(const struct sched_unit *unit)
@@ -1277,7 +1277,7 @@ runq_insert(const struct scheduler *ops, struct csched2_unit *svc)
     struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!vcpu_on_runq(svc));
     ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
@@ -1798,7 +1798,7 @@ static bool vcpu_grab_budget(struct csched2_unit *svc)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( svc->budget > 0 )
         return true;
@@ -1845,7 +1845,7 @@ vcpu_return_budget(struct csched2_unit *svc, struct list_head *parked)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(list_empty(parked));
 
     /* budget_lock nests inside runqueue lock. */
@@ -2102,7 +2102,7 @@ csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
     unsigned int cpu = vc->processor;
     s_time_t now;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!is_idle_vcpu(vc));
 
@@ -2230,7 +2230,7 @@ csched2_res_pick(const struct scheduler *ops, struct sched_unit *unit)
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -2570,7 +2570,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
      * on either side may be empty).
      */
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
     update_runq_load(ops, st.lrqd, 0, now);
@@ -3476,7 +3476,7 @@ csched2_schedule(
     rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -3867,7 +3867,7 @@ csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 
     rqi = init_pdata(prv, pdata, cpu);
     /* Move the scheduler lock to the new runq lock. */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    get_sched_res(cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock(old_lock);
@@ -3881,6 +3881,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct csched2_private *prv = csched2_priv(new_ops);
     struct csched2_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
     unsigned rqi;
 
     ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
@@ -3906,7 +3907,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * this scheduler, and so it's safe to have taken it /before/ our
      * private global lock.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
+    ASSERT(sd->schedule_lock != &prv->rqd[rqi].lock);
 
     write_unlock(&prv->lock);
 
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 4dc0356f80..5f0356c7f8 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -269,7 +269,7 @@ pick_res(struct null_private *prv, struct sched_unit *unit)
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     for_each_affinity_balance_step( bs )
     {
@@ -419,7 +419,7 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
                                      unsigned int cpu,
                                      void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct null_private *prv = null_priv(new_ops);
     struct null_unit *nvc = vdata;
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 06f63176ee..3ce85122cc 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -75,7 +75,7 @@
 /*
  * Locking:
  * A global system lock is used to protect the RunQ and DepletedQ.
- * The global lock is referenced by schedule_data.schedule_lock
+ * The global lock is referenced by sched_res->schedule_lock
  * from all physical cpus.
  *
  * The lock is already grabbed when calling wake/sleep/schedule/ functions
@@ -176,7 +176,7 @@ static void repl_timer_handler(void *data);
 
 /*
  * System-wide private data, include global RunQueue/DepletedQ
- * Global lock is referenced by schedule_data.schedule_lock from all
+ * Global lock is referenced by sched_res->schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
@@ -722,7 +722,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
     }
 
     /* Move the scheduler lock to our global runqueue lock.  */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    get_sched_res(cpu)->schedule_lock = &prv->lock;
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
@@ -735,6 +735,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct rt_private *prv = rt_priv(new_ops);
     struct rt_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
 
@@ -744,7 +745,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * another scheduler, but that is how things need to be, for
      * preventing races.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->lock);
+    ASSERT(sd->schedule_lock != &prv->lock);
 
     /*
      * If we are the absolute first cpu being switched toward this
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 45394e6c2e..c167eb23f2 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -65,7 +65,6 @@ static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);
 
@@ -213,7 +212,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              !test_bit(v->vcpu_id, v->domain->poll_mask) )
         {
             v->is_urgent = 0;
-            atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_dec(&get_sched_res(v->processor)->urgent_count);
         }
     }
     else
@@ -222,7 +221,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
         {
             v->is_urgent = 1;
-            atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_inc(&get_sched_res(v->processor)->urgent_count);
         }
     }
 }
@@ -233,7 +232,7 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -392,7 +391,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = unit;
+        get_sched_res(v->processor)->curr = unit;
         v->is_running = 1;
     }
     else
@@ -517,7 +516,7 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
-        atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
+        atomic_dec(&get_sched_res(v->processor)->urgent_count);
     sched_remove_unit(vcpu_scheduler(v), unit);
     sched_free_vdata(vcpu_scheduler(v), unit->priv);
     sched_free_unit(unit);
@@ -564,7 +563,7 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -659,8 +658,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
      */
     if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
     {
-        atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
-        atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+        atomic_inc(&get_sched_res(new_cpu)->urgent_count);
+        atomic_dec(&get_sched_res(old_cpu)->urgent_count);
     }
 
     /*
@@ -726,20 +725,20 @@ static void vcpu_migrate_finish(struct vcpu *v)
          * are not correct any longer after evaluating old and new cpu holding
          * the locks.
          */
-        old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
-        new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+        old_lock = get_sched_res(old_cpu)->schedule_lock;
+        new_lock = get_sched_res(new_cpu)->schedule_lock;
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
         old_cpu = v->processor;
-        if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+        if ( old_lock == get_sched_res(old_cpu)->schedule_lock )
         {
             /*
              * If we selected a CPU on the previosu iteration, check if it
              * remains suitable for running this vCPU.
              */
             if ( pick_called &&
-                 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+                 (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -747,7 +746,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
             /* Select a new CPU. */
             new_cpu = sched_pick_resource(vcpu_scheduler(v),
                                           v->sched_unit)->processor;
-            if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+            if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
@@ -1558,7 +1557,7 @@ static void schedule(void)
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
-    struct schedule_data *sd;
+    struct sched_resource *sd;
     spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
@@ -1567,7 +1566,7 @@ static void schedule(void)
 
     SCHED_STAT_CRANK(sched_run);
 
-    sd = &this_cpu(schedule_data);
+    sd = get_sched_res(cpu);
 
     /* Update tasklet scheduling status. */
     switch ( *tasklet_work )
@@ -1708,14 +1707,13 @@ static void poll_timer_fn(void *data)
 
 static int cpu_schedule_up(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
-    struct sched_resource *res;
+    struct sched_resource *sd;
 
-    res = xzalloc(struct sched_resource);
-    if ( res == NULL )
+    sd = xzalloc(struct sched_resource);
+    if ( sd == NULL )
         return -ENOMEM;
-    res->processor = cpu;
-    set_sched_res(cpu, res);
+    sd->processor = cpu;
+    set_sched_res(cpu, sd);
 
     per_cpu(scheduler, cpu) = &sched_idle_ops;
     spin_lock_init(&sd->_lock);
@@ -1730,7 +1728,7 @@ static int cpu_schedule_up(unsigned int cpu)
     if ( idle_vcpu[cpu] == NULL )
         vcpu_create(idle_vcpu[0]->domain, cpu, cpu);
     else
-        idle_vcpu[cpu]->sched_unit->res = res;
+        idle_vcpu[cpu]->sched_unit->res = sd;
 
     if ( idle_vcpu[cpu] == NULL )
         return -ENOMEM;
@@ -1750,7 +1748,7 @@ static int cpu_schedule_up(unsigned int cpu)
 
 static void cpu_schedule_down(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
 
     kill_timer(&sd->s_timer);
 
@@ -1909,7 +1907,7 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_unit;
+    get_sched_res(0)->curr = idle_vcpu[0]->sched_unit;
 }
 
 /*
@@ -1926,7 +1924,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
     struct cpupool *old_pool = per_cpu(cpupool, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     spinlock_t *old_lock, *new_lock;
     unsigned long flags;
 
@@ -2109,6 +2107,16 @@ void wait(void)
     schedule();
 }
 
+/*
+ * vcpu is urgent if vcpu is polling event channel
+ *
+ * if urgent vcpu exists, CPU should not enter deep C state
+ */
+bool sched_has_urgent_vcpu(void)
+{
+    return atomic_read(&get_sched_res(smp_processor_id())->urgent_count);
+}
+
 #ifdef CONFIG_COMPAT
 #include "compat/schedule.c"
 #endif
diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h
index 488f708305..5d7dffd228 100644
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -4,7 +4,6 @@
 #include <xen/cpuidle.h>
 #include <xen/notifier.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 
 extern struct acpi_processor_power *processor_powers[];
 
@@ -27,14 +26,4 @@ void update_idle_stats(struct acpi_processor_power *,
 void update_last_cx_stat(struct acpi_processor_power *,
                          struct acpi_processor_cx *, uint64_t);
 
-/*
- * vcpu is urgent if vcpu is polling event channel
- *
- * if urgent vcpu exists, CPU should not enter deep C state
- */
-static inline int sched_has_urgent_vcpu(void)
-{
-    return atomic_read(&this_cpu(schedule_data).urgent_count);
-}
-
 #endif /* __X86_ASM_CPUIDLE_H__ */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 69aedd2210..212c612374 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -33,22 +33,18 @@ extern int sched_ratelimit_us;
  * For cache betterness, keep the actual lock in the same cache area
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
-struct schedule_data {
+struct sched_resource {
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;           /* current task                    */
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
+    unsigned int        processor;
 };
 
-#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
-
-struct sched_resource {
-    unsigned int processor;
-};
+#define curr_on_cpu(c)    (get_sched_res(c)->curr)
 
-DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
@@ -79,7 +75,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 { \
     for ( ; ; ) \
     { \
-        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        spinlock_t *lock = get_sched_res(cpu)->schedule_lock; \
         /* \
          * v->processor may change when grabbing the lock; but \
          * per_cpu(v->processor) may also change, if changing cpu pool \
@@ -89,7 +85,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
          * lock may be the same; this will succeed in that case. \
          */ \
         spin_lock##irq(lock, ## arg); \
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+        if ( likely(lock == get_sched_res(cpu)->schedule_lock) ) \
             return lock; \
         spin_unlock##irq(lock, ## arg); \
     } \
@@ -99,7 +95,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
                                                EXTRA_TYPE(arg), param) \
 { \
-    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    ASSERT(lock == get_sched_res(cpu)->schedule_lock); \
     spin_unlock##irq(lock, ## arg); \
 }
 
@@ -128,11 +124,11 @@ sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
 
 static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spinlock_t *lock = get_sched_res(cpu)->schedule_lock;
 
     if ( !spin_trylock(lock) )
         return NULL;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+    if ( lock == get_sched_res(cpu)->schedule_lock )
         return lock;
     spin_unlock(lock);
     return NULL;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2ab1b13c63..575ce9a245 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -903,6 +903,7 @@ void restore_vcpu_affinity(struct domain *d);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
+bool sched_has_urgent_vcpu(void);
 
 /*
  * Used by idle loop to decide whether there is work to do:
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-08-09 14:59 UTC|newest]

Thread overview: 126+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-09 14:57 [Xen-devel] [PATCH v2 00/48] xen: add core scheduling support Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 01/48] xen/sched: use new sched_unit instead of vcpu in scheduler interfaces Juergen Gross
2019-09-02  9:07   ` Jan Beulich
2019-09-09  5:26     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 02/48] xen/sched: move per-vcpu scheduler private data pointer to sched_unit Juergen Gross
2019-08-23 10:47   ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 03/48] xen/sched: build a linked list of struct sched_unit Juergen Gross
2019-08-23 10:52   ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 04/48] xen/sched: introduce struct sched_resource Juergen Gross
2019-08-23 10:54   ` Dario Faggioli
2019-09-04 13:10   ` Jan Beulich
2019-09-09  5:31     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 05/48] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-09-04 13:34   ` Jan Beulich
2019-09-09  5:43     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 06/48] xen/sched: switch schedule_data.curr to point at sched_unit Juergen Gross
2019-09-04 13:36   ` Jan Beulich
2019-09-09  5:46     ` Juergen Gross
2019-08-09 14:57 ` Juergen Gross [this message]
2019-09-04 13:48   ` [Xen-devel] [PATCH v2 07/48] xen/sched: move per cpu scheduler private data into struct sched_resource Jan Beulich
2019-09-05  7:13     ` Juergen Gross
2019-09-05  7:38       ` Jan Beulich
2019-09-09 13:03         ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 08/48] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock Juergen Gross
2019-09-04 14:02   ` Jan Beulich
2019-09-04 14:41     ` Juergen Gross
2019-09-04 14:54       ` Jan Beulich
2019-09-04 15:02         ` Juergen Gross
2019-09-11 16:02           ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 09/48] xen/sched: move some per-vcpu items to struct sched_unit Juergen Gross
2019-09-04 14:16   ` Jan Beulich
2019-09-09  6:39     ` Juergen Gross
2019-09-09  6:55       ` Jan Beulich
2019-09-09  7:05         ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 10/48] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-09-04 14:49   ` Jan Beulich
2019-09-11 13:22     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 11/48] xen/sched: rename scheduler related perf counters Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 12/48] xen/sched: switch struct task_slice from vcpu to sched_unit Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 13/48] xen/sched: add is_running indicator to struct sched_unit Juergen Gross
2019-09-04 15:06   ` Jan Beulich
2019-09-11 13:44     ` Juergen Gross
2019-09-11 15:06       ` Jan Beulich
2019-09-11 15:32         ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 14/48] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 15/48] xen/sched: make rt " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 16/48] xen/sched: make credit " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 17/48] xen/sched: make credit2 " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 18/48] xen/sched: make arinc653 " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 19/48] xen: add sched_unit_pause_nosync() and sched_unit_unpause() Juergen Gross
2019-09-09 13:34   ` Jan Beulich
2019-09-11 14:15     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 20/48] xen: let vcpu_create() select processor Juergen Gross
2019-08-23 16:42   ` Julien Grall
2019-09-09 13:38   ` Jan Beulich
2019-09-11 14:22     ` Juergen Gross
2019-09-11 17:20       ` Dario Faggioli
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 21/48] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-09-09 14:17   ` Jan Beulich
2019-09-12  9:34     ` Juergen Gross
2019-09-12 10:04       ` Jan Beulich
2019-09-12 11:03         ` Juergen Gross
2019-09-12 11:17         ` Juergen Gross
2019-09-12 11:46           ` Jan Beulich
2019-09-12 11:53             ` Juergen Gross
2019-09-12 12:08               ` Jan Beulich
2019-09-12 12:13                 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 22/48] xen/sched: switch schedule() from vcpus to sched_units Juergen Gross
2019-09-09 14:35   ` Jan Beulich
2019-09-12 13:44     ` Juergen Gross
2019-09-12 14:34       ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 23/48] xen/sched: switch sched_move_irqs() to take sched_unit as parameter Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 24/48] xen: switch from for_each_vcpu() to for_each_sched_unit() Juergen Gross
2019-09-09 15:14   ` Jan Beulich
2019-09-12 14:02     ` Juergen Gross
2019-09-12 14:40       ` Jan Beulich
2019-09-12 14:47         ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 25/48] xen/sched: add runstate counters to struct sched_unit Juergen Gross
2019-09-09 14:30   ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 26/48] xen/sched: rework and rename vcpu_force_reschedule() Juergen Gross
2019-09-10 14:06   ` Jan Beulich
2019-09-13  9:33     ` Juergen Gross
2019-09-13  9:40       ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 27/48] xen/sched: Change vcpu_migrate_*() to operate on schedule unit Juergen Gross
2019-09-10 15:11   ` Jan Beulich
2019-09-13 12:33     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 28/48] xen/sched: move struct task_slice into struct sched_unit Juergen Gross
2019-09-10 15:18   ` Jan Beulich
2019-09-13 12:56     ` Juergen Gross
2019-09-12  8:13   ` Dario Faggioli
2019-09-12  8:21     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 29/48] xen/sched: add code to sync scheduling of all vcpus of a sched unit Juergen Gross
2019-09-10 15:36   ` Jan Beulich
2019-09-13 13:12     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 30/48] xen/sched: introduce unit_runnable_state() Juergen Gross
2019-09-11 10:30   ` Jan Beulich
2019-09-12 10:22     ` Dario Faggioli
2019-09-13 14:07     ` Juergen Gross
2019-09-13 14:44       ` Jan Beulich
2019-09-13 15:23         ` Juergen Gross
2019-09-12 10:24   ` Dario Faggioli
2019-09-13 14:14     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 31/48] xen/sched: add support for multiple vcpus per sched unit where missing Juergen Gross
2019-09-11 10:43   ` Jan Beulich
2019-09-13 15:01     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 32/48] xen/sched: modify cpupool_domain_cpumask() to be an unit mask Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 33/48] xen/sched: support allocating multiple vcpus into one sched unit Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 34/48] xen/sched: add a percpu resource index Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 35/48] xen/sched: add fall back to idle vcpu when scheduling unit Juergen Gross
2019-09-11 11:33   ` Julien Grall
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 36/48] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 37/48] xen/sched: carve out freeing sched_unit memory into dedicated function Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 38/48] xen/sched: move per-cpu variable scheduler to struct sched_resource Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 39/48] xen/sched: move per-cpu variable cpupool " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 40/48] xen/sched: reject switching smt on/off with core scheduling active Juergen Gross
2019-09-10 15:47   ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 41/48] xen/sched: prepare per-cpupool scheduling granularity Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 42/48] xen/sched: split schedule_cpu_switch() Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 43/48] xen/sched: protect scheduling resource via rcu Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 44/48] xen/sched: support multiple cpus per scheduling resource Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 45/48] xen/sched: support differing granularity in schedule_cpu_[add/rm]() Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 46/48] xen/sched: support core scheduling for moving cpus to/from cpupools Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 47/48] xen/sched: disable scheduling when entering ACPI deep sleep states Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 48/48] xen/sched: add scheduling granularity enum Juergen Gross
2019-08-15 10:17 ` [Xen-devel] [PATCH v2 00/48] xen: add core scheduling support Sergey Dyasli
2019-09-05  6:22   ` Juergen Gross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190809145833.1020-8-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=josh.whitehead@dornerworks.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=robert.vanvossen@dornerworks.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).