All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>, Tim Deegan <tim@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Robert VanVossen <robert.vanvossen@dornerworks.com>,
	Dario Faggioli <dfaggioli@suse.com>,
	Julien Grall <julien.grall@arm.com>,
	Josh Whitehead <josh.whitehead@dornerworks.com>,
	Meng Xu <mengxu@cis.upenn.edu>, Jan Beulich <jbeulich@suse.com>
Subject: [Xen-devel] [PATCH v4 05/46] xen/sched: let pick_cpu return a scheduler resource
Date: Fri, 27 Sep 2019 09:00:09 +0200	[thread overview]
Message-ID: <20190927070050.12405-6-jgross@suse.com> (raw)
In-Reply-To: <20190927070050.12405-1-jgross@suse.com>

Instead of returning a physical cpu number let pick_cpu() return a
scheduler resource instead. Rename pick_cpu() to pick_resource() to
reflect that change.

Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>
---
V3:
- style fix (Jan Beulich)
---
 xen/common/sched_arinc653.c  | 13 +++++++------
 xen/common/sched_credit.c    | 16 ++++++++--------
 xen/common/sched_credit2.c   | 22 +++++++++++-----------
 xen/common/sched_null.c      | 23 ++++++++++++-----------
 xen/common/sched_rt.c        | 18 +++++++++---------
 xen/common/schedule.c        | 18 ++++++++++--------
 xen/include/xen/perfc_defn.h |  2 +-
 xen/include/xen/sched-if.h   | 10 +++++-----
 8 files changed, 63 insertions(+), 59 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 67009f235d..9faa1c48c4 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -607,15 +607,16 @@ a653sched_do_schedule(
 }
 
 /**
- * Xen scheduler callback function to select a CPU for the VCPU to run on
+ * Xen scheduler callback function to select a resource for the VCPU to run on
  *
  * @param ops       Pointer to this instance of the scheduler structure
  * @param unit      Pointer to struct sched_unit
  *
- * @return          Number of selected physical CPU
+ * @return          Scheduler resource to run on
  */
-static int
-a653sched_pick_cpu(const struct scheduler *ops, const struct sched_unit *unit)
+static struct sched_resource *
+a653sched_pick_resource(const struct scheduler *ops,
+                        const struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
     cpumask_t *online;
@@ -633,7 +634,7 @@ a653sched_pick_cpu(const struct scheduler *ops, const struct sched_unit *unit)
          || (cpu >= nr_cpu_ids) )
         cpu = vc->processor;
 
-    return cpu;
+    return get_sched_res(cpu);
 }
 
 /**
@@ -726,7 +727,7 @@ static const struct scheduler sched_arinc653_def = {
 
     .do_schedule    = a653sched_do_schedule,
 
-    .pick_cpu       = a653sched_pick_cpu,
+    .pick_resource  = a653sched_pick_resource,
 
     .switch_sched   = a653_switch_sched,
 
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 4b4d7021de..fa73081b3c 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -853,8 +853,8 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
     return cpu;
 }
 
-static int
-csched_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit)
+static struct sched_resource *
+csched_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
     struct csched_vcpu *svc = CSCHED_VCPU(vc);
@@ -867,7 +867,7 @@ csched_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit)
      * get boosted, which we don't deserve as we are "only" migrating.
      */
     set_bit(CSCHED_FLAG_VCPU_MIGRATING, &svc->flags);
-    return _csched_cpu_pick(ops, vc, 1);
+    return get_sched_res(_csched_cpu_pick(ops, vc, 1));
 }
 
 static inline void
@@ -967,7 +967,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
         /*
          * If it's been active a while, check if we'd be better off
          * migrating it to run elsewhere (see multi-core and multi-thread
-         * support in csched_cpu_pick()).
+         * support in csched_res_pick()).
          */
         new_cpu = _csched_cpu_pick(ops, current, 0);
 
@@ -1022,11 +1022,11 @@ csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    /* csched_cpu_pick() looks in vc->processor's runq, so we need the lock. */
+    /* csched_res_pick() looks in vc->processor's runq, so we need the lock. */
     lock = vcpu_schedule_lock_irq(vc);
 
-    vc->processor = csched_cpu_pick(ops, unit);
-    unit->res = get_sched_res(vc->processor);
+    unit->res = csched_res_pick(ops, unit);
+    vc->processor = unit->res->master_cpu;
 
     spin_unlock_irq(lock);
 
@@ -2278,7 +2278,7 @@ static const struct scheduler sched_credit_def = {
     .adjust_affinity= csched_aff_cntl,
     .adjust_global  = csched_sys_cntl,
 
-    .pick_cpu       = csched_cpu_pick,
+    .pick_resource  = csched_res_pick,
     .do_schedule    = csched_schedule,
 
     .dump_cpu_state = csched_dump_pcpu,
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 2981d642b0..37192e6713 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -626,9 +626,9 @@ static inline bool has_cap(const struct csched2_vcpu *svc)
  * runq, _always_ happens by means of tickling:
  *  - when a vcpu wakes up, it calls csched2_unit_wake(), which calls
  *    runq_tickle();
- *  - when a migration is initiated in schedule.c, we call csched2_cpu_pick(),
+ *  - when a migration is initiated in schedule.c, we call csched2_res_pick(),
  *    csched2_unit_migrate() (which calls migrate()) and csched2_unit_wake().
- *    csched2_cpu_pick() looks for the least loaded runq and return just any
+ *    csched2_res_pick() looks for the least loaded runq and return just any
  *    of its processors. Then, csched2_unit_migrate() just moves the vcpu to
  *    the chosen runq, and it is again runq_tickle(), called by
  *    csched2_unit_wake() that actually decides what pcpu to use within the
@@ -677,7 +677,7 @@ void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask)
 }
 
 /*
- * In csched2_cpu_pick(), it may not be possible to actually look at remote
+ * In csched2_res_pick(), it may not be possible to actually look at remote
  * runqueues (the trylock-s on their spinlocks can fail!). If that happens,
  * we pick, in order of decreasing preference:
  *  1) svc's current pcpu, if it is part of svc's soft affinity;
@@ -2202,8 +2202,8 @@ csched2_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 }
 
 #define MAX_LOAD (STIME_MAX)
-static int
-csched2_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit)
+static struct sched_resource *
+csched2_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct csched2_private *prv = csched2_priv(ops);
     struct vcpu *vc = unit->vcpu_list;
@@ -2215,7 +2215,7 @@ csched2_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit)
 
     ASSERT(!cpumask_empty(&prv->active_queues));
 
-    SCHED_STAT_CRANK(pick_cpu);
+    SCHED_STAT_CRANK(pick_resource);
 
     /* Locking:
      * - Runqueue lock of vc->processor is already locked
@@ -2424,7 +2424,7 @@ csched2_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit)
                     (unsigned char *)&d);
     }
 
-    return new_cpu;
+    return get_sched_res(new_cpu);
 }
 
 /* Working state of the load-balancing algorithm */
@@ -3121,11 +3121,11 @@ csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
     ASSERT(!is_idle_vcpu(vc));
     ASSERT(list_empty(&svc->runq_elem));
 
-    /* csched2_cpu_pick() expects the pcpu lock to be held */
+    /* csched2_res_pick() expects the pcpu lock to be held */
     lock = vcpu_schedule_lock_irq(vc);
 
-    vc->processor = csched2_cpu_pick(ops, unit);
-    unit->res = get_sched_res(vc->processor);
+    unit->res = csched2_res_pick(ops, unit);
+    vc->processor = unit->res->master_cpu;
 
     spin_unlock_irq(lock);
 
@@ -4112,7 +4112,7 @@ static const struct scheduler sched_credit2_def = {
     .adjust_affinity= csched2_aff_cntl,
     .adjust_global  = csched2_sys_cntl,
 
-    .pick_cpu       = csched2_cpu_pick,
+    .pick_resource  = csched2_res_pick,
     .migrate        = csched2_unit_migrate,
     .do_schedule    = csched2_schedule,
     .context_saved  = csched2_context_saved,
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index cb5e1b52db..cb400f55d0 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -261,9 +261,11 @@ static void null_free_domdata(const struct scheduler *ops, void *data)
  *
  * So this is not part of any hot path.
  */
-static unsigned int pick_cpu(struct null_private *prv, struct vcpu *v)
+static struct sched_resource *
+pick_res(struct null_private *prv, const struct sched_unit *unit)
 {
     unsigned int bs;
+    struct vcpu *v = unit->vcpu_list;
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
@@ -327,7 +329,7 @@ static unsigned int pick_cpu(struct null_private *prv, struct vcpu *v)
         __trace_var(TRC_SNULL_PICKED_CPU, 1, sizeof(d), &d);
     }
 
-    return new_cpu;
+    return get_sched_res(new_cpu);
 }
 
 static void vcpu_assign(struct null_private *prv, struct vcpu *v,
@@ -457,8 +459,8 @@ static void null_unit_insert(const struct scheduler *ops,
     }
 
  retry:
-    cpu = v->processor = pick_cpu(prv, v);
-    unit->res = get_sched_res(cpu);
+    unit->res = pick_res(prv, unit);
+    cpu = v->processor = unit->res->master_cpu;
 
     spin_unlock(lock);
 
@@ -599,7 +601,7 @@ static void null_unit_wake(const struct scheduler *ops,
          */
         while ( cpumask_intersects(&prv->cpus_free, cpumask_scratch_cpu(cpu)) )
         {
-            unsigned int new_cpu = pick_cpu(prv, v);
+            unsigned int new_cpu = pick_res(prv, unit)->master_cpu;
 
             if ( test_and_clear_bit(new_cpu, &prv->cpus_free) )
             {
@@ -648,12 +650,11 @@ static void null_unit_sleep(const struct scheduler *ops,
     SCHED_STAT_CRANK(vcpu_sleep);
 }
 
-static int null_cpu_pick(const struct scheduler *ops,
-                         const struct sched_unit *unit)
+static struct sched_resource *
+null_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
-    struct vcpu *v = unit->vcpu_list;
-    ASSERT(!is_idle_vcpu(v));
-    return pick_cpu(null_priv(ops), v);
+    ASSERT(!is_idle_vcpu(unit->vcpu_list));
+    return pick_res(null_priv(ops), unit);
 }
 
 static void null_unit_migrate(const struct scheduler *ops,
@@ -985,7 +986,7 @@ static const struct scheduler sched_null_def = {
 
     .wake           = null_unit_wake,
     .sleep          = null_unit_sleep,
-    .pick_cpu       = null_cpu_pick,
+    .pick_resource  = null_res_pick,
     .migrate        = null_unit_migrate,
     .do_schedule    = null_schedule,
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 01e95f3276..6ca792e643 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -631,12 +631,12 @@ replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc)
 }
 
 /*
- * Pick a valid CPU for the vcpu vc
- * Valid CPU of a vcpu is intesection of vcpu's affinity
- * and available cpus
+ * Pick a valid resource for the vcpu vc
+ * Valid resource of a vcpu is intesection of vcpu's affinity
+ * and available resources
  */
-static int
-rt_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit)
+static struct sched_resource *
+rt_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
     cpumask_t cpus;
@@ -651,7 +651,7 @@ rt_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit)
             : cpumask_cycle(vc->processor, &cpus);
     ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) );
 
-    return cpu;
+    return get_sched_res(cpu);
 }
 
 /*
@@ -892,8 +892,8 @@ rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
     BUG_ON( is_idle_vcpu(vc) );
 
     /* This is safe because vc isn't yet being scheduled */
-    vc->processor = rt_cpu_pick(ops, unit);
-    unit->res = get_sched_res(vc->processor);
+    unit->res = rt_res_pick(ops, unit);
+    vc->processor = unit->res->master_cpu;
 
     lock = vcpu_schedule_lock_irq(vc);
 
@@ -1562,7 +1562,7 @@ static const struct scheduler sched_rtds_def = {
 
     .adjust         = rt_dom_cntl,
 
-    .pick_cpu       = rt_cpu_pick,
+    .pick_resource  = rt_res_pick,
     .do_schedule    = rt_schedule,
     .sleep          = rt_unit_sleep,
     .wake           = rt_unit_wake,
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 774f127d88..8bca32f5c4 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -87,10 +87,10 @@ sched_idle_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     return &sched_free_cpu_lock;
 }
 
-static int
-sched_idle_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit)
+static struct sched_resource *
+sched_idle_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
-    return unit->res->master_cpu;
+    return unit->res;
 }
 
 static void *
@@ -122,7 +122,7 @@ static struct scheduler sched_idle_ops = {
     .opt_name       = "idle",
     .sched_data     = NULL,
 
-    .pick_cpu       = sched_idle_cpu_pick,
+    .pick_resource  = sched_idle_res_pick,
     .do_schedule    = sched_idle_schedule,
 
     .alloc_udata    = sched_idle_alloc_udata,
@@ -747,7 +747,8 @@ static void vcpu_migrate_finish(struct vcpu *v)
                 break;
 
             /* Select a new CPU. */
-            new_cpu = sched_pick_cpu(vcpu_scheduler(v), v->sched_unit);
+            new_cpu = sched_pick_resource(vcpu_scheduler(v),
+                                          v->sched_unit)->master_cpu;
             if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -840,8 +841,9 @@ void restore_vcpu_affinity(struct domain *d)
 
         /* v->processor might have changed, so reacquire the lock. */
         lock = vcpu_schedule_lock_irq(v);
-        v->processor = sched_pick_cpu(vcpu_scheduler(v), v->sched_unit);
-        v->sched_unit->res = get_sched_res(v->processor);
+        v->sched_unit->res = sched_pick_resource(vcpu_scheduler(v),
+                                                 v->sched_unit);
+        v->processor = v->sched_unit->res->master_cpu;
         spin_unlock_irq(lock);
 
         if ( old_cpu != v->processor )
@@ -1854,7 +1856,7 @@ void __init scheduler_init(void)
 
         sched_test_func(init);
         sched_test_func(deinit);
-        sched_test_func(pick_cpu);
+        sched_test_func(pick_resource);
         sched_test_func(alloc_udata);
         sched_test_func(free_udata);
         sched_test_func(switch_sched);
diff --git a/xen/include/xen/perfc_defn.h b/xen/include/xen/perfc_defn.h
index ef6f86b91e..1ad4384080 100644
--- a/xen/include/xen/perfc_defn.h
+++ b/xen/include/xen/perfc_defn.h
@@ -69,7 +69,7 @@ PERFCOUNTER(migrate_on_runq,        "csched2: migrate_on_runq")
 PERFCOUNTER(migrate_no_runq,        "csched2: migrate_no_runq")
 PERFCOUNTER(runtime_min_timer,      "csched2: runtime_min_timer")
 PERFCOUNTER(runtime_max_timer,      "csched2: runtime_max_timer")
-PERFCOUNTER(pick_cpu,               "csched2: pick_cpu")
+PERFCOUNTER(pick_resource,          "csched2: pick_resource")
 PERFCOUNTER(need_fallback_cpu,      "csched2: need_fallback_cpu")
 PERFCOUNTER(migrated,               "csched2: migrated")
 PERFCOUNTER(migrate_resisted,       "csched2: migrate_resisted")
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 5c9ac07587..4f61f65288 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -189,8 +189,8 @@ struct scheduler {
     struct task_slice (*do_schedule) (const struct scheduler *, s_time_t,
                                       bool_t tasklet_work_scheduled);
 
-    int          (*pick_cpu)       (const struct scheduler *,
-                                    const struct sched_unit *);
+    struct sched_resource *(*pick_resource)(const struct scheduler *,
+                                            const struct sched_unit *);
     void         (*migrate)        (const struct scheduler *,
                                     struct sched_unit *, unsigned int);
     int          (*adjust)         (const struct scheduler *, struct domain *,
@@ -355,10 +355,10 @@ static inline void sched_migrate(const struct scheduler *s,
     }
 }
 
-static inline int sched_pick_cpu(const struct scheduler *s,
-                                 const struct sched_unit *unit)
+static inline struct sched_resource *sched_pick_resource(
+    const struct scheduler *s, const struct sched_unit *unit)
 {
-    return s->pick_cpu(s, unit);
+    return s->pick_resource(s, unit);
 }
 
 static inline void sched_adjust_affinity(const struct scheduler *s,
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-09-27  7:01 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-27  7:00 [Xen-devel] [PATCH v4 00/46] xen: add core scheduling support Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 01/46] xen/sched: use new sched_unit instead of vcpu in scheduler interfaces Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 02/46] xen/sched: move per-vcpu scheduler private data pointer to sched_unit Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 03/46] xen/sched: build a linked list of struct sched_unit Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 04/46] xen/sched: introduce struct sched_resource Juergen Gross
2019-09-27  7:00 ` Juergen Gross [this message]
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 06/46] xen/sched: switch schedule_data.curr to point at sched_unit Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 07/46] xen/sched: move per cpu scheduler private data into struct sched_resource Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 08/46] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 09/46] xen/sched: move some per-vcpu items to struct sched_unit Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 10/46] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-09-27  8:52   ` Dario Faggioli
2019-09-27  9:01     ` Jürgen Groß
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 11/46] xen/sched: rename scheduler related perf counters Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 12/46] xen/sched: switch struct task_slice from vcpu to sched_unit Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 13/46] xen/sched: add is_running indicator to struct sched_unit Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 14/46] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 15/46] xen/sched: make rt " Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 16/46] xen/sched: make credit " Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 17/46] xen/sched: make credit2 " Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 18/46] xen/sched: make arinc653 " Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 19/46] xen: add sched_unit_pause_nosync() and sched_unit_unpause() Juergen Gross
2019-09-27  8:57   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 20/46] xen: let vcpu_create() select processor Juergen Gross
2019-09-27  9:17   ` Dario Faggioli
2019-09-27 12:47   ` Jan Beulich
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 21/46] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 22/46] xen/sched: switch schedule() from vcpus to sched_units Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 23/46] xen/sched: switch sched_move_irqs() to take sched_unit as parameter Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 24/46] xen: switch from for_each_vcpu() to for_each_sched_unit() Juergen Gross
2019-09-27  9:32   ` Dario Faggioli
2019-09-27  9:46     ` Jürgen Groß
2019-09-27 12:49     ` Jan Beulich
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 25/46] xen/sched: add runstate counters to struct sched_unit Juergen Gross
2019-09-27  9:25   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 26/46] xen/sched: Change vcpu_migrate_*() to operate on schedule unit Juergen Gross
2019-09-27  9:35   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 27/46] xen/sched: move struct task_slice into struct sched_unit Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 28/46] xen/sched: add code to sync scheduling of all vcpus of a sched unit Juergen Gross
2019-09-27 14:38   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 29/46] xen/sched: introduce unit_runnable_state() Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 30/46] xen/sched: add support for multiple vcpus per sched unit where missing Juergen Gross
2019-09-27 14:54   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 31/46] xen/sched: modify cpupool_domain_cpumask() to be an unit mask Juergen Gross
2019-09-27  9:46   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 32/46] xen/sched: support allocating multiple vcpus into one sched unit Juergen Gross
2019-09-27 15:01   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 33/46] xen/sched: add a percpu resource index Juergen Gross
2019-09-27 15:02   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 34/46] xen/sched: add fall back to idle vcpu when scheduling unit Juergen Gross
2019-09-27 15:21   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 35/46] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-09-27 15:26   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 36/46] xen/sched: move per-cpu variable scheduler to struct sched_resource Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 37/46] xen/sched: move per-cpu variable cpupool " Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 38/46] xen/sched: reject switching smt on/off with core scheduling active Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 39/46] xen/sched: prepare per-cpupool scheduling granularity Juergen Gross
2019-09-27 15:31   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 40/46] xen/sched: split schedule_cpu_switch() Juergen Gross
2019-09-27 15:34   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 41/46] xen/sched: protect scheduling resource via rcu Juergen Gross
2019-09-27 15:39   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 42/46] xen/sched: support multiple cpus per scheduling resource Juergen Gross
2019-09-27 16:01   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 43/46] xen/sched: support differing granularity in schedule_cpu_[add/rm]() Juergen Gross
2019-09-27 16:17   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 44/46] xen/sched: support core scheduling for moving cpus to/from cpupools Juergen Gross
2019-09-27 16:21   ` Dario Faggioli
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 45/46] xen/sched: disable scheduling when entering ACPI deep sleep states Juergen Gross
2019-09-27  7:00 ` [Xen-devel] [PATCH v4 46/46] xen/sched: add scheduling granularity enum Juergen Gross
2019-09-27 16:34 ` [Xen-devel] [PATCH v4 00/46] xen: add core scheduling support Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190927070050.12405-6-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=josh.whitehead@dornerworks.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=robert.vanvossen@dornerworks.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.