All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: "Juergen Gross" <jgross@suse.com>, "Tim Deegan" <tim@xen.org>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wl@xen.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"George Dunlap" <george.dunlap@eu.citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Ian Jackson" <ian.jackson@eu.citrix.com>,
	"Robert VanVossen" <robert.vanvossen@dornerworks.com>,
	"Dario Faggioli" <dfaggioli@suse.com>,
	"Julien Grall" <julien.grall@arm.com>,
	"Josh Whitehead" <josh.whitehead@dornerworks.com>,
	"Meng Xu" <mengxu@cis.upenn.edu>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v3 07/47] xen/sched: move per cpu scheduler private data into struct sched_resource
Date: Sat, 14 Sep 2019 10:52:11 +0200	[thread overview]
Message-ID: <20190914085251.18816-8-jgross@suse.com> (raw)
In-Reply-To: <20190914085251.18816-1-jgross@suse.com>

This prepares support of larger scheduling granularities, e.g. core
scheduling.

While at it move sched_has_urgent_vcpu() from include/asm-x86/cpuidle.h
into sched.h removing the need for including sched-if.h in cpuidle.h.
For that purpose remobe urgent_count from the scheduler private data
and make it a plain percpu variable.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V1:
- move sched_has_urgent_vcpu()
V2:
- make sched_has_urgent_vcpu() return bool (Jan Beulich)
V3:
- split out removing sched-if.h include in some C files (Jan Beulich)
- make urgent_count a plain percpu variable (Jan Beulich)
---
 xen/common/sched_arinc653.c   |  4 ++--
 xen/common/sched_credit.c     | 10 ++++----
 xen/common/sched_credit2.c    | 21 +++++++++--------
 xen/common/sched_null.c       |  4 ++--
 xen/common/sched_rt.c         |  9 +++----
 xen/common/schedule.c         | 55 ++++++++++++++++++++++---------------------
 xen/include/asm-x86/cpuidle.h | 11 ---------
 xen/include/xen/sched-if.h    | 23 ++++++++----------
 xen/include/xen/sched.h       | 11 +++++++++
 9 files changed, 74 insertions(+), 74 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 8585d9c4fe..98cdd7f894 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == unit )
+    if ( get_sched_res(vc->processor)->curr == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
@@ -643,7 +643,7 @@ static spinlock_t *
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     arinc653_vcpu_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index b7d44efc8b..ee42de6bce 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -82,7 +82,7 @@
 #define CSCHED_PRIV(_ops)   \
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
-    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
+    ((struct csched_pcpu *)get_sched_res(_c)->sched_priv)
 #define CSCHED_UNIT(unit)   ((struct csched_unit *) (unit)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -250,7 +250,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
            is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
@@ -259,7 +259,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
 static inline void
 inc_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     CSCHED_PCPU(cpu)->nr_runnable++;
 
 }
@@ -267,7 +267,7 @@ inc_nr_runnable(unsigned int cpu)
 static inline void
 dec_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1);
     CSCHED_PCPU(cpu)->nr_runnable--;
 }
@@ -628,7 +628,7 @@ static spinlock_t *
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_unit *svc = vdata;
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 4982f1ef36..849f3a916e 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -568,7 +568,7 @@ static inline struct csched2_private *csched2_priv(const struct scheduler *ops)
 
 static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
 {
-    return per_cpu(schedule_data, cpu).sched_priv;
+    return get_sched_res(cpu)->sched_priv;
 }
 
 static inline struct csched2_unit *csched2_unit(const struct sched_unit *unit)
@@ -1277,7 +1277,7 @@ runq_insert(const struct scheduler *ops, struct csched2_unit *svc)
     struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!vcpu_on_runq(svc));
     ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
@@ -1798,7 +1798,7 @@ static bool vcpu_grab_budget(struct csched2_unit *svc)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( svc->budget > 0 )
         return true;
@@ -1845,7 +1845,7 @@ vcpu_return_budget(struct csched2_unit *svc, struct list_head *parked)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(list_empty(parked));
 
     /* budget_lock nests inside runqueue lock. */
@@ -2102,7 +2102,7 @@ csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
     unsigned int cpu = vc->processor;
     s_time_t now;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!is_idle_vcpu(vc));
 
@@ -2230,7 +2230,7 @@ csched2_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -2570,7 +2570,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
      * on either side may be empty).
      */
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
     update_runq_load(ops, st.lrqd, 0, now);
@@ -3476,7 +3476,7 @@ csched2_schedule(
     rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -3867,7 +3867,7 @@ csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 
     rqi = init_pdata(prv, pdata, cpu);
     /* Move the scheduler lock to the new runq lock. */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    get_sched_res(cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock(old_lock);
@@ -3881,6 +3881,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct csched2_private *prv = csched2_priv(new_ops);
     struct csched2_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
     unsigned rqi;
 
     ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
@@ -3906,7 +3907,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * this scheduler, and so it's safe to have taken it /before/ our
      * private global lock.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
+    ASSERT(sd->schedule_lock != &prv->rqd[rqi].lock);
 
     write_unlock(&prv->lock);
 
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index d7d0e77cb5..e7ba55c650 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -269,7 +269,7 @@ pick_res(struct null_private *prv, const struct sched_unit *unit)
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     for_each_affinity_balance_step( bs )
     {
@@ -419,7 +419,7 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
                                      unsigned int cpu,
                                      void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct null_private *prv = null_priv(new_ops);
     struct null_unit *nvc = vdata;
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 115e4da24d..f31c7dc02c 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -75,7 +75,7 @@
 /*
  * Locking:
  * A global system lock is used to protect the RunQ and DepletedQ.
- * The global lock is referenced by schedule_data.schedule_lock
+ * The global lock is referenced by sched_res->schedule_lock
  * from all physical cpus.
  *
  * The lock is already grabbed when calling wake/sleep/schedule/ functions
@@ -176,7 +176,7 @@ static void repl_timer_handler(void *data);
 
 /*
  * System-wide private data, include global RunQueue/DepletedQ
- * Global lock is referenced by schedule_data.schedule_lock from all
+ * Global lock is referenced by sched_res->schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
@@ -722,7 +722,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
     }
 
     /* Move the scheduler lock to our global runqueue lock.  */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    get_sched_res(cpu)->schedule_lock = &prv->lock;
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
@@ -735,6 +735,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct rt_private *prv = rt_priv(new_ops);
     struct rt_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
 
@@ -744,7 +745,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * another scheduler, but that is how things need to be, for
      * preventing races.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->lock);
+    ASSERT(sd->schedule_lock != &prv->lock);
 
     /*
      * If we are the absolute first cpu being switched toward this
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 865c50b378..42bca8df54 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -65,13 +65,15 @@ static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);
 
 /* Scratch space for cpumasks. */
 DEFINE_PER_CPU(cpumask_t, cpumask_scratch);
 
+/* How many urgent vcpus. */
+DEFINE_PER_CPU(atomic_t, sched_urgent_count);
+
 extern const struct scheduler *__start_schedulers_array[], *__end_schedulers_array[];
 #define NUM_SCHEDULERS (__end_schedulers_array - __start_schedulers_array)
 #define schedulers __start_schedulers_array
@@ -213,7 +215,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              !test_bit(v->vcpu_id, v->domain->poll_mask) )
         {
             v->is_urgent = 0;
-            atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_dec(&per_cpu(sched_urgent_count, v->processor));
         }
     }
     else
@@ -222,7 +224,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
         {
             v->is_urgent = 1;
-            atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_inc(&per_cpu(sched_urgent_count, v->processor));
         }
     }
 }
@@ -233,7 +235,7 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -393,7 +395,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = unit;
+        get_sched_res(v->processor)->curr = unit;
         v->is_running = 1;
     }
     else
@@ -518,7 +520,7 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
-        atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
+        atomic_dec(&per_cpu(sched_urgent_count, v->processor));
     sched_remove_unit(vcpu_scheduler(v), unit);
     sched_free_vdata(vcpu_scheduler(v), unit->priv);
     sched_free_unit(unit);
@@ -565,7 +567,7 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -660,8 +662,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
      */
     if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
     {
-        atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
-        atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+        atomic_inc(&per_cpu(sched_urgent_count, new_cpu));
+        atomic_dec(&per_cpu(sched_urgent_count, old_cpu));
     }
 
     /*
@@ -727,20 +729,20 @@ static void vcpu_migrate_finish(struct vcpu *v)
          * are not correct any longer after evaluating old and new cpu holding
          * the locks.
          */
-        old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
-        new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+        old_lock = get_sched_res(old_cpu)->schedule_lock;
+        new_lock = get_sched_res(new_cpu)->schedule_lock;
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
         old_cpu = v->processor;
-        if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+        if ( old_lock == get_sched_res(old_cpu)->schedule_lock )
         {
             /*
              * If we selected a CPU on the previosu iteration, check if it
              * remains suitable for running this vCPU.
              */
             if ( pick_called &&
-                 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+                 (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -748,7 +750,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
             /* Select a new CPU. */
             new_cpu = sched_pick_resource(vcpu_scheduler(v),
                                           v->sched_unit)->master_cpu;
-            if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+            if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
@@ -1565,7 +1567,7 @@ static void schedule(void)
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
-    struct schedule_data *sd;
+    struct sched_resource *sd;
     spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
@@ -1574,7 +1576,7 @@ static void schedule(void)
 
     SCHED_STAT_CRANK(sched_run);
 
-    sd = &this_cpu(schedule_data);
+    sd = get_sched_res(cpu);
 
     /* Update tasklet scheduling status. */
     switch ( *tasklet_work )
@@ -1715,20 +1717,19 @@ static void poll_timer_fn(void *data)
 
 static int cpu_schedule_up(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
-    struct sched_resource *res;
+    struct sched_resource *sd;
 
-    res = xzalloc(struct sched_resource);
-    if ( res == NULL )
+    sd = xzalloc(struct sched_resource);
+    if ( sd == NULL )
         return -ENOMEM;
-    res->master_cpu = cpu;
-    set_sched_res(cpu, res);
+    sd->master_cpu = cpu;
+    set_sched_res(cpu, sd);
 
     per_cpu(scheduler, cpu) = &sched_idle_ops;
     spin_lock_init(&sd->_lock);
     sd->schedule_lock = &sched_free_cpu_lock;
     init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
-    atomic_set(&sd->urgent_count, 0);
+    atomic_set(&per_cpu(sched_urgent_count, cpu), 0);
 
     /* Boot CPU is dealt with later in scheduler_init(). */
     if ( cpu == 0 )
@@ -1737,7 +1738,7 @@ static int cpu_schedule_up(unsigned int cpu)
     if ( idle_vcpu[cpu] == NULL )
         vcpu_create(idle_vcpu[0]->domain, cpu, cpu);
     else
-        idle_vcpu[cpu]->sched_unit->res = res;
+        idle_vcpu[cpu]->sched_unit->res = sd;
 
     if ( idle_vcpu[cpu] == NULL )
         return -ENOMEM;
@@ -1757,7 +1758,7 @@ static int cpu_schedule_up(unsigned int cpu)
 
 static void cpu_schedule_down(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
 
     kill_timer(&sd->s_timer);
 
@@ -1916,7 +1917,7 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_unit;
+    get_sched_res(0)->curr = idle_vcpu[0]->sched_unit;
 }
 
 /*
@@ -1933,7 +1934,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
     struct cpupool *old_pool = per_cpu(cpupool, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     spinlock_t *old_lock, *new_lock;
     unsigned long flags;
 
diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h
index 488f708305..5d7dffd228 100644
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -4,7 +4,6 @@
 #include <xen/cpuidle.h>
 #include <xen/notifier.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 
 extern struct acpi_processor_power *processor_powers[];
 
@@ -27,14 +26,4 @@ void update_idle_stats(struct acpi_processor_power *,
 void update_last_cx_stat(struct acpi_processor_power *,
                          struct acpi_processor_cx *, uint64_t);
 
-/*
- * vcpu is urgent if vcpu is polling event channel
- *
- * if urgent vcpu exists, CPU should not enter deep C state
- */
-static inline int sched_has_urgent_vcpu(void)
-{
-    return atomic_read(&this_cpu(schedule_data).urgent_count);
-}
-
 #endif /* __X86_ASM_CPUIDLE_H__ */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 6aa8d125c8..2b152d1c75 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -33,22 +33,19 @@ extern int sched_ratelimit_us;
  * For cache betterness, keep the actual lock in the same cache area
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
-struct schedule_data {
+struct sched_resource {
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
-    atomic_t            urgent_count;   /* how many urgent vcpus           */
-};
-
-#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
 
-struct sched_resource {
-    unsigned int master_cpu;  /* Cpu with lowest id in scheduling resource. */
+    /* Cpu with lowest id in scheduling resource. */
+    unsigned int        master_cpu;
 };
 
-DECLARE_PER_CPU(struct schedule_data, schedule_data);
+#define curr_on_cpu(c)    (get_sched_res(c)->curr)
+
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
@@ -79,7 +76,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 { \
     for ( ; ; ) \
     { \
-        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        spinlock_t *lock = get_sched_res(cpu)->schedule_lock; \
         /* \
          * v->processor may change when grabbing the lock; but \
          * per_cpu(v->processor) may also change, if changing cpu pool \
@@ -89,7 +86,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
          * lock may be the same; this will succeed in that case. \
          */ \
         spin_lock##irq(lock, ## arg); \
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+        if ( likely(lock == get_sched_res(cpu)->schedule_lock) ) \
             return lock; \
         spin_unlock##irq(lock, ## arg); \
     } \
@@ -99,7 +96,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
                                                EXTRA_TYPE(arg), param) \
 { \
-    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    ASSERT(lock == get_sched_res(cpu)->schedule_lock); \
     spin_unlock##irq(lock, ## arg); \
 }
 
@@ -128,11 +125,11 @@ sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
 
 static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spinlock_t *lock = get_sched_res(cpu)->schedule_lock;
 
     if ( !spin_trylock(lock) )
         return NULL;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+    if ( lock == get_sched_res(cpu)->schedule_lock )
         return lock;
     spin_unlock(lock);
     return NULL;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 999e43e8cc..8780888603 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -881,6 +881,17 @@ static inline struct vcpu *domain_vcpu(const struct domain *d,
 
 void cpu_init(void);
 
+/*
+ * vcpu is urgent if vcpu is polling event channel
+ *
+ * if urgent vcpu exists, CPU should not enter deep C state
+ */
+DECLARE_PER_CPU(atomic_t, sched_urgent_count);
+static inline bool sched_has_urgent_vcpu(void)
+{
+    return atomic_read(&this_cpu(sched_urgent_count));
+}
+
 struct scheduler;
 
 struct scheduler *scheduler_get_default(void);
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-09-14  8:53 UTC|newest]

Thread overview: 161+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-14  8:52 [Xen-devel] [PATCH v3 00/47] xen: add core scheduling support Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 01/47] xen/sched: use new sched_unit instead of vcpu in scheduler interfaces Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 02/47] xen/sched: move per-vcpu scheduler private data pointer to sched_unit Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 03/47] xen/sched: build a linked list of struct sched_unit Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 04/47] xen/sched: introduce struct sched_resource Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 05/47] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 06/47] xen/sched: switch schedule_data.curr to point at sched_unit Juergen Gross
2019-09-14  8:52 ` Juergen Gross [this message]
2019-09-19 15:27   ` [Xen-devel] [PATCH v3 07/47] xen/sched: move per cpu scheduler private data into struct sched_resource Jan Beulich
2019-09-24 11:41     ` Jürgen Groß
2019-09-24 12:08       ` Jan Beulich
2019-09-24 15:10         ` Jürgen Groß
2019-09-24 17:07           ` Dario Faggioli
2019-09-25  6:49         ` Jürgen Groß
2019-09-24 17:11       ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 08/47] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock Juergen Gross
2019-09-24 17:18   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 09/47] xen/sched: move some per-vcpu items to struct sched_unit Juergen Gross
2019-09-19 15:38   ` Jan Beulich
2019-09-24 11:44     ` Jürgen Groß
2019-09-24 17:23   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 10/47] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-09-19 15:45   ` Jan Beulich
2019-09-24 11:46     ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 11/47] xen/sched: rename scheduler related perf counters Juergen Gross
2019-09-24 17:25   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 12/47] xen/sched: switch struct task_slice from vcpu to sched_unit Juergen Gross
2019-09-25 15:23   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 13/47] xen/sched: add is_running indicator to struct sched_unit Juergen Gross
2019-09-19 15:53   ` Jan Beulich
2019-09-24 11:47     ` Jürgen Groß
2019-09-24 12:00       ` Jan Beulich
2019-09-24 12:34         ` Jürgen Groß
2019-09-25 15:27   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 14/47] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-09-25 12:51   ` Dario Faggioli
2019-09-25 12:59     ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 15/47] xen/sched: make rt " Juergen Gross
2019-09-25 13:20   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 16/47] xen/sched: make credit " Juergen Gross
2019-09-25 14:21   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 17/47] xen/sched: make credit2 " Juergen Gross
2019-09-25 15:02   ` Dario Faggioli
2019-09-25 15:18     ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 18/47] xen/sched: make arinc653 " Juergen Gross
2019-09-25 13:51   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 19/47] xen: add sched_unit_pause_nosync() and sched_unit_unpause() Juergen Gross
2019-09-25 15:20   ` Dario Faggioli
2019-09-26  4:49     ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 20/47] xen: let vcpu_create() select processor Juergen Gross
2019-09-25 16:31   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 21/47] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-09-26 10:01   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 22/47] xen/sched: switch schedule() from vcpus to sched_units Juergen Gross
2019-09-25 16:34   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 23/47] xen/sched: switch sched_move_irqs() to take sched_unit as parameter Juergen Gross
2019-09-20 14:34   ` Jan Beulich
2019-09-24 11:55     ` Jürgen Groß
2019-09-24 20:43   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 24/47] xen: switch from for_each_vcpu() to for_each_sched_unit() Juergen Gross
2019-09-20 15:05   ` Jan Beulich
2019-09-24 12:13     ` Jürgen Groß
2019-09-24 12:31       ` Jan Beulich
2019-09-24 15:00         ` Jürgen Groß
2019-09-24 15:04           ` Jan Beulich
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 25/47] xen/sched: add runstate counters to struct sched_unit Juergen Gross
2019-09-20 15:27   ` Jan Beulich
2019-09-24 13:50     ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 26/47] xen/sched: Change vcpu_migrate_*() to operate on schedule unit Juergen Gross
2019-09-24 22:33   ` Dario Faggioli
2019-09-25 12:04     ` Jürgen Groß
2019-09-25 16:37       ` Dario Faggioli
2019-09-26  4:51         ` Jürgen Groß
2019-09-26 13:29           ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 27/47] xen/sched: move struct task_slice into struct sched_unit Juergen Gross
2019-09-26 10:30   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 28/47] xen/sched: add code to sync scheduling of all vcpus of a sched unit Juergen Gross
2019-09-20 16:08   ` Jan Beulich
2019-09-24 14:14     ` Jürgen Groß
2019-09-24 14:39       ` Jan Beulich
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 29/47] xen/sched: introduce unit_runnable_state() Juergen Gross
2019-09-23 15:24   ` Jan Beulich
2019-09-24 14:25     ` Jürgen Groß
2019-09-24 14:45       ` Jan Beulich
2019-09-24 14:48         ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 30/47] xen/sched: add support for multiple vcpus per sched unit where missing Juergen Gross
2019-09-23 15:41   ` Jan Beulich
2019-09-24 14:41     ` Jürgen Groß
2019-09-24 15:00       ` Jan Beulich
2019-09-24 15:09         ` Jürgen Groß
2019-09-24 15:22           ` Jan Beulich
2019-09-25 12:40             ` Jürgen Groß
2019-09-25 13:07               ` Jan Beulich
2019-09-26 13:53                 ` Dario Faggioli
2019-09-26 14:24                   ` Jan Beulich
2019-09-26 14:40                   ` Jürgen Groß
2019-09-26 16:41                     ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 31/47] xen/sched: modify cpupool_domain_cpumask() to be an unit mask Juergen Gross
2019-09-23 15:44   ` Jan Beulich
2019-09-24 14:45     ` Jürgen Groß
2019-09-23 15:50   ` Jan Beulich
2019-09-24 14:46     ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 32/47] xen/sched: support allocating multiple vcpus into one sched unit Juergen Gross
2019-09-24  9:46   ` Jan Beulich
2019-09-24 10:06     ` Jürgen Groß
2019-09-24 10:13       ` Jan Beulich
2019-09-24 15:13         ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 33/47] xen/sched: add a percpu resource index Juergen Gross
2019-09-24 10:05   ` Jan Beulich
2019-09-24 15:20     ` Jürgen Groß
2019-09-24 15:29       ` Jan Beulich
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 34/47] xen/sched: add fall back to idle vcpu when scheduling unit Juergen Gross
2019-09-24 10:53   ` Jan Beulich
2019-09-25 12:58     ` Jürgen Groß
2019-09-25 13:11       ` Jan Beulich
2019-09-25 13:15         ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 35/47] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-09-24 11:55   ` Jan Beulich
2019-09-25 13:07     ` Jürgen Groß
2019-09-27  4:42       ` Jürgen Groß
2019-09-27  7:32         ` Dario Faggioli
2019-09-27  7:48           ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 36/47] xen/sched: carve out freeing sched_unit memory into dedicated function Juergen Gross
2019-09-24 12:04   ` Jan Beulich
2019-09-25 13:09     ` Jürgen Groß
2019-09-25 13:16       ` Jan Beulich
2019-09-25 17:31         ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 37/47] xen/sched: move per-cpu variable scheduler to struct sched_resource Juergen Gross
2019-09-24 12:16   ` Jan Beulich
2019-09-25 13:13     ` Jürgen Groß
2019-09-26  9:06       ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 38/47] xen/sched: move per-cpu variable cpupool " Juergen Gross
2019-09-24 13:10   ` Jan Beulich
2019-09-25 13:17     ` Jürgen Groß
2019-09-26  9:09   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 39/47] xen/sched: reject switching smt on/off with core scheduling active Juergen Gross
2019-09-26  9:10   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 40/47] xen/sched: prepare per-cpupool scheduling granularity Juergen Gross
2019-09-24 13:34   ` Jan Beulich
2019-09-25 13:31     ` Jürgen Groß
2019-09-26  9:41     ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 41/47] xen/sched: split schedule_cpu_switch() Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 42/47] xen/sched: protect scheduling resource via rcu Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 43/47] xen/sched: support multiple cpus per scheduling resource Juergen Gross
2019-09-24 13:49   ` Jan Beulich
2019-09-25 13:39     ` Jürgen Groß
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 44/47] xen/sched: support differing granularity in schedule_cpu_[add/rm]() Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 45/47] xen/sched: support core scheduling for moving cpus to/from cpupools Juergen Gross
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 46/47] xen/sched: disable scheduling when entering ACPI deep sleep states Juergen Gross
2019-09-25 16:02   ` Jan Beulich
2019-09-26  9:41   ` Dario Faggioli
2019-09-14  8:52 ` [Xen-devel] [PATCH v3 47/47] xen/sched: add scheduling granularity enum Juergen Gross
2019-09-26  9:46   ` Dario Faggioli
2019-09-26 12:37     ` Jürgen Groß
2019-09-26 16:21       ` Dario Faggioli
2019-09-20 16:14 ` [Xen-devel] [PATCH v3 00/47] xen: add core scheduling support Jan Beulich
2019-09-24 10:36   ` Dario Faggioli
2019-09-24 11:15 ` Sergey Dyasli
2019-09-24 11:17   ` Jürgen Groß
2019-09-24 17:29   ` Dario Faggioli
2019-09-24 17:42     ` Igor Druzhinin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190914085251.18816-8-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=josh.whitehead@dornerworks.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=robert.vanvossen@dornerworks.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.