All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Dario Faggioli <dfaggioli@suse.com>
Subject: [Xen-devel] [PATCH v2 14/48] xen/sched: make null scheduler vcpu agnostic.
Date: Fri,  9 Aug 2019 16:57:59 +0200	[thread overview]
Message-ID: <20190809145833.1020-15-jgross@suse.com> (raw)
In-Reply-To: <20190809145833.1020-1-jgross@suse.com>

Switch null scheduler completely from vcpu to sched_unit usage.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched_null.c | 333 ++++++++++++++++++++++++------------------------
 1 file changed, 165 insertions(+), 168 deletions(-)

diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 84507a41c8..a630951110 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -18,10 +18,10 @@
 
 /*
  * The 'null' scheduler always choose to run, on each pCPU, either nothing
- * (i.e., the pCPU stays idle) or always the same vCPU.
+ * (i.e., the pCPU stays idle) or always the same Item.
  *
  * It is aimed at supporting static scenarios, where there always are
- * less vCPUs than pCPUs (and the vCPUs don't need to move among pCPUs
+ * less Items than pCPUs (and the Items don't need to move among pCPUs
  * for any reason) with the least possible overhead.
  *
  * Typical usecase are embedded applications, but also HPC, especially
@@ -37,8 +37,8 @@
  * null tracing events. Check include/public/trace.h for more details.
  */
 #define TRC_SNULL_PICKED_CPU    TRC_SCHED_CLASS_EVT(SNULL, 1)
-#define TRC_SNULL_VCPU_ASSIGN   TRC_SCHED_CLASS_EVT(SNULL, 2)
-#define TRC_SNULL_VCPU_DEASSIGN TRC_SCHED_CLASS_EVT(SNULL, 3)
+#define TRC_SNULL_UNIT_ASSIGN   TRC_SCHED_CLASS_EVT(SNULL, 2)
+#define TRC_SNULL_UNIT_DEASSIGN TRC_SCHED_CLASS_EVT(SNULL, 3)
 #define TRC_SNULL_MIGRATE       TRC_SCHED_CLASS_EVT(SNULL, 4)
 #define TRC_SNULL_SCHEDULE      TRC_SCHED_CLASS_EVT(SNULL, 5)
 #define TRC_SNULL_TASKLET       TRC_SCHED_CLASS_EVT(SNULL, 6)
@@ -47,13 +47,13 @@
  * Locking:
  * - Scheduler-lock (a.k.a. runqueue lock):
  *  + is per-pCPU;
- *  + serializes assignment and deassignment of vCPUs to a pCPU.
+ *  + serializes assignment and deassignment of Items to a pCPU.
  * - Private data lock (a.k.a. private scheduler lock):
  *  + is scheduler-wide;
  *  + serializes accesses to the list of domains in this scheduler.
  * - Waitqueue lock:
  *  + is scheduler-wide;
- *  + serialize accesses to the list of vCPUs waiting to be assigned
+ *  + serialize accesses to the list of Items waiting to be assigned
  *    to pCPUs.
  *
  * Ordering is: private lock, runqueue lock, waitqueue lock. Or, OTOH,
@@ -77,25 +77,25 @@
 struct null_private {
     spinlock_t lock;        /* scheduler lock; nests inside cpupool_lock */
     struct list_head ndom;  /* Domains of this scheduler                 */
-    struct list_head waitq; /* vCPUs not assigned to any pCPU            */
+    struct list_head waitq; /* Items not assigned to any pCPU            */
     spinlock_t waitq_lock;  /* serializes waitq; nests inside runq locks */
-    cpumask_t cpus_free;    /* CPUs without a vCPU associated to them    */
+    cpumask_t cpus_free;    /* CPUs without a Item associated to them    */
 };
 
 /*
  * Physical CPU
  */
 struct null_pcpu {
-    struct vcpu *vcpu;
+    struct sched_unit *unit;
 };
 DEFINE_PER_CPU(struct null_pcpu, npc);
 
 /*
- * Virtual CPU
+ * Schedule Item
  */
 struct null_unit {
     struct list_head waitq_elem;
-    struct vcpu *vcpu;
+    struct sched_unit *unit;
 };
 
 /*
@@ -119,13 +119,13 @@ static inline struct null_unit *null_unit(const struct sched_unit *unit)
     return unit->priv;
 }
 
-static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
+static inline bool unit_check_affinity(struct sched_unit *unit,
+                                       unsigned int cpu,
                                        unsigned int balance_step)
 {
-    affinity_balance_cpumask(v->sched_unit, balance_step,
-                             cpumask_scratch_cpu(cpu));
+    affinity_balance_cpumask(unit, balance_step, cpumask_scratch_cpu(cpu));
     cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
-                cpupool_domain_cpumask(v->domain));
+                cpupool_domain_cpumask(unit->domain));
 
     return cpumask_test_cpu(cpu, cpumask_scratch_cpu(cpu));
 }
@@ -160,9 +160,9 @@ static void null_deinit(struct scheduler *ops)
 
 static void init_pdata(struct null_private *prv, unsigned int cpu)
 {
-    /* Mark the pCPU as free, and with no vCPU assigned */
+    /* Mark the pCPU as free, and with no unit assigned */
     cpumask_set_cpu(cpu, &prv->cpus_free);
-    per_cpu(npc, cpu).vcpu = NULL;
+    per_cpu(npc, cpu).unit = NULL;
 }
 
 static void null_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
@@ -183,13 +183,12 @@ static void null_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
     ASSERT(!pcpu);
 
     cpumask_clear_cpu(cpu, &prv->cpus_free);
-    per_cpu(npc, cpu).vcpu = NULL;
+    per_cpu(npc, cpu).unit = NULL;
 }
 
 static void *null_alloc_vdata(const struct scheduler *ops,
                               struct sched_unit *unit, void *dd)
 {
-    struct vcpu *v = unit->vcpu_list;
     struct null_unit *nvc;
 
     nvc = xzalloc(struct null_unit);
@@ -197,7 +196,7 @@ static void *null_alloc_vdata(const struct scheduler *ops,
         return NULL;
 
     INIT_LIST_HEAD(&nvc->waitq_elem);
-    nvc->vcpu = v;
+    nvc->unit = unit;
 
     SCHED_STAT_CRANK(unit_alloc);
 
@@ -249,15 +248,15 @@ static void null_free_domdata(const struct scheduler *ops, void *data)
 }
 
 /*
- * vCPU to pCPU assignment and placement. This _only_ happens:
+ * unit to pCPU assignment and placement. This _only_ happens:
  *  - on insert,
  *  - on migrate.
  *
- * Insert occurs when a vCPU joins this scheduler for the first time
+ * Insert occurs when a unit joins this scheduler for the first time
  * (e.g., when the domain it's part of is moved to the scheduler's
  * cpupool).
  *
- * Migration may be necessary if a pCPU (with a vCPU assigned to it)
+ * Migration may be necessary if a pCPU (with a unit assigned to it)
  * is removed from the scheduler's cpupool.
  *
  * So this is not part of any hot path.
@@ -266,9 +265,8 @@ static struct sched_resource *
 pick_res(struct null_private *prv, struct sched_unit *unit)
 {
     unsigned int bs;
-    struct vcpu *v = unit->vcpu_list;
-    unsigned int cpu = v->processor, new_cpu;
-    cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
+    unsigned int cpu = sched_unit_cpu(unit), new_cpu;
+    cpumask_t *cpus = cpupool_domain_cpumask(unit->domain);
 
     ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
@@ -283,11 +281,12 @@ pick_res(struct null_private *prv, struct sched_unit *unit)
         /*
          * If our processor is free, or we are assigned to it, and it is also
          * still valid and part of our affinity, just go for it.
-         * (Note that we may call vcpu_check_affinity(), but we deliberately
+         * (Note that we may call unit_check_affinity(), but we deliberately
          * don't, so we get to keep in the scratch cpumask what we have just
          * put in it.)
          */
-        if ( likely((per_cpu(npc, cpu).vcpu == NULL || per_cpu(npc, cpu).vcpu == v)
+        if ( likely((per_cpu(npc, cpu).unit == NULL ||
+                     per_cpu(npc, cpu).unit == unit)
                     && cpumask_test_cpu(cpu, cpumask_scratch_cpu(cpu))) )
         {
             new_cpu = cpu;
@@ -305,13 +304,13 @@ pick_res(struct null_private *prv, struct sched_unit *unit)
 
     /*
      * If we didn't find any free pCPU, just pick any valid pcpu, even if
-     * it has another vCPU assigned. This will happen during shutdown and
+     * it has another Item assigned. This will happen during shutdown and
      * suspend/resume, but it may also happen during "normal operation", if
      * all the pCPUs are busy.
      *
      * In fact, there must always be something sane in v->processor, or
      * unit_schedule_lock() and friends won't work. This is not a problem,
-     * as we will actually assign the vCPU to the pCPU we return from here,
+     * as we will actually assign the Item to the pCPU we return from here,
      * only if the pCPU is free.
      */
     cpumask_and(cpumask_scratch_cpu(cpu), cpus, unit->cpu_hard_affinity);
@@ -321,11 +320,11 @@ pick_res(struct null_private *prv, struct sched_unit *unit)
     if ( unlikely(tb_init_done) )
     {
         struct {
-            uint16_t vcpu, dom;
+            uint16_t unit, dom;
             uint32_t new_cpu;
         } d;
-        d.dom = v->domain->domain_id;
-        d.vcpu = v->vcpu_id;
+        d.dom = unit->domain->domain_id;
+        d.unit = unit->unit_id;
         d.new_cpu = new_cpu;
         __trace_var(TRC_SNULL_PICKED_CPU, 1, sizeof(d), &d);
     }
@@ -333,64 +332,64 @@ pick_res(struct null_private *prv, struct sched_unit *unit)
     return get_sched_res(new_cpu);
 }
 
-static void vcpu_assign(struct null_private *prv, struct vcpu *v,
+static void unit_assign(struct null_private *prv, struct sched_unit *unit,
                         unsigned int cpu)
 {
-    ASSERT(is_vcpu_online(v));
+    ASSERT(is_unit_online(unit));
 
-    per_cpu(npc, cpu).vcpu = v;
-    v->processor = cpu;
-    v->sched_unit->res = get_sched_res(cpu);
+    per_cpu(npc, cpu).unit = unit;
+    sched_set_res(unit, get_sched_res(cpu));
     cpumask_clear_cpu(cpu, &prv->cpus_free);
 
-    dprintk(XENLOG_G_INFO, "%d <-- %pv\n", cpu, v);
+    dprintk(XENLOG_G_INFO, "%d <-- %pdv%d\n", cpu, unit->domain, unit->unit_id);
 
     if ( unlikely(tb_init_done) )
     {
         struct {
-            uint16_t vcpu, dom;
+            uint16_t unit, dom;
             uint32_t cpu;
         } d;
-        d.dom = v->domain->domain_id;
-        d.vcpu = v->vcpu_id;
+        d.dom = unit->domain->domain_id;
+        d.unit = unit->unit_id;
         d.cpu = cpu;
-        __trace_var(TRC_SNULL_VCPU_ASSIGN, 1, sizeof(d), &d);
+        __trace_var(TRC_SNULL_UNIT_ASSIGN, 1, sizeof(d), &d);
     }
 }
 
 /* Returns true if a cpu was tickled */
-static bool vcpu_deassign(struct null_private *prv, struct vcpu *v)
+static bool unit_deassign(struct null_private *prv, struct sched_unit *unit)
 {
     unsigned int bs;
-    unsigned int cpu = v->processor;
+    unsigned int cpu = sched_unit_cpu(unit);
     struct null_unit *wvc;
 
-    ASSERT(list_empty(&null_unit(v->sched_unit)->waitq_elem));
-    ASSERT(per_cpu(npc, v->processor).vcpu == v);
-    ASSERT(!cpumask_test_cpu(v->processor, &prv->cpus_free));
+    ASSERT(list_empty(&null_unit(unit)->waitq_elem));
+    ASSERT(per_cpu(npc, cpu).unit == unit);
+    ASSERT(!cpumask_test_cpu(cpu, &prv->cpus_free));
 
-    per_cpu(npc, cpu).vcpu = NULL;
+    per_cpu(npc, cpu).unit = NULL;
     cpumask_set_cpu(cpu, &prv->cpus_free);
 
-    dprintk(XENLOG_G_INFO, "%d <-- NULL (%pv)\n", cpu, v);
+    dprintk(XENLOG_G_INFO, "%d <-- NULL (%pdv%d)\n", cpu, unit->domain,
+            unit->unit_id);
 
     if ( unlikely(tb_init_done) )
     {
         struct {
-            uint16_t vcpu, dom;
+            uint16_t unit, dom;
             uint32_t cpu;
         } d;
-        d.dom = v->domain->domain_id;
-        d.vcpu = v->vcpu_id;
+        d.dom = unit->domain->domain_id;
+        d.unit = unit->unit_id;
         d.cpu = cpu;
-        __trace_var(TRC_SNULL_VCPU_DEASSIGN, 1, sizeof(d), &d);
+        __trace_var(TRC_SNULL_UNIT_DEASSIGN, 1, sizeof(d), &d);
     }
 
     spin_lock(&prv->waitq_lock);
 
     /*
-     * If v is assigned to a pCPU, let's see if there is someone waiting,
-     * suitable to be assigned to it (prioritizing vcpus that have
+     * If unit is assigned to a pCPU, let's see if there is someone waiting,
+     * suitable to be assigned to it (prioritizing units that have
      * soft-affinity with cpu).
      */
     for_each_affinity_balance_step( bs )
@@ -398,13 +397,13 @@ static bool vcpu_deassign(struct null_private *prv, struct vcpu *v)
         list_for_each_entry( wvc, &prv->waitq, waitq_elem )
         {
             if ( bs == BALANCE_SOFT_AFFINITY &&
-                 !has_soft_affinity(wvc->vcpu->sched_unit) )
+                 !has_soft_affinity(wvc->unit) )
                 continue;
 
-            if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
+            if ( unit_check_affinity(wvc->unit, cpu, bs) )
             {
                 list_del_init(&wvc->waitq_elem);
-                vcpu_assign(prv, wvc->vcpu, cpu);
+                unit_assign(prv, wvc->unit, cpu);
                 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
                 spin_unlock(&prv->waitq_lock);
                 return true;
@@ -425,9 +424,9 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
     struct null_private *prv = null_priv(new_ops);
     struct null_unit *nvc = vdata;
 
-    ASSERT(nvc && is_idle_vcpu(nvc->vcpu));
+    ASSERT(nvc && is_idle_unit(nvc->unit));
 
-    idle_vcpu[cpu]->sched_unit->priv = vdata;
+    sched_idle_unit(cpu)->priv = vdata;
 
     /*
      * We are holding the runqueue lock already (it's been taken in
@@ -444,41 +443,40 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
 static void null_unit_insert(const struct scheduler *ops,
                              struct sched_unit *unit)
 {
-    struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
     struct null_unit *nvc = null_unit(unit);
     unsigned int cpu;
     spinlock_t *lock;
 
-    ASSERT(!is_idle_vcpu(v));
+    ASSERT(!is_idle_unit(unit));
 
     lock = unit_schedule_lock_irq(unit);
 
-    if ( unlikely(!is_vcpu_online(v)) )
+    if ( unlikely(!is_unit_online(unit)) )
     {
         unit_schedule_unlock_irq(lock, unit);
         return;
     }
 
  retry:
-    unit->res = pick_res(prv, unit);
-    cpu = v->processor = unit->res->processor;
+    sched_set_res(unit, pick_res(prv, unit));
+    cpu = sched_unit_cpu(unit);
 
     spin_unlock(lock);
 
     lock = unit_schedule_lock(unit);
 
     cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
-                cpupool_domain_cpumask(v->domain));
+                cpupool_domain_cpumask(unit->domain));
 
-    /* If the pCPU is free, we assign v to it */
-    if ( likely(per_cpu(npc, cpu).vcpu == NULL) )
+    /* If the pCPU is free, we assign unit to it */
+    if ( likely(per_cpu(npc, cpu).unit == NULL) )
     {
         /*
          * Insert is followed by vcpu_wake(), so there's no need to poke
          * the pcpu with the SCHEDULE_SOFTIRQ, as wake will do that.
          */
-        vcpu_assign(prv, v, cpu);
+        unit_assign(prv, unit, cpu);
     }
     else if ( cpumask_intersects(&prv->cpus_free, cpumask_scratch_cpu(cpu)) )
     {
@@ -497,7 +495,8 @@ static void null_unit_insert(const struct scheduler *ops,
          */
         spin_lock(&prv->waitq_lock);
         list_add_tail(&nvc->waitq_elem, &prv->waitq);
-        dprintk(XENLOG_G_WARNING, "WARNING: %pv not assigned to any CPU!\n", v);
+        dprintk(XENLOG_G_WARNING, "WARNING: %pdv%d not assigned to any CPU!\n",
+                unit->domain, unit->unit_id);
         spin_unlock(&prv->waitq_lock);
     }
     spin_unlock_irq(lock);
@@ -508,24 +507,23 @@ static void null_unit_insert(const struct scheduler *ops,
 static void null_unit_remove(const struct scheduler *ops,
                              struct sched_unit *unit)
 {
-    struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
     struct null_unit *nvc = null_unit(unit);
     spinlock_t *lock;
 
-    ASSERT(!is_idle_vcpu(v));
+    ASSERT(!is_idle_unit(unit));
 
     lock = unit_schedule_lock_irq(unit);
 
-    /* If offline, the vcpu shouldn't be assigned, nor in the waitqueue */
-    if ( unlikely(!is_vcpu_online(v)) )
+    /* If offline, the unit shouldn't be assigned, nor in the waitqueue */
+    if ( unlikely(!is_unit_online(unit)) )
     {
-        ASSERT(per_cpu(npc, v->processor).vcpu != v);
+        ASSERT(per_cpu(npc, sched_unit_cpu(unit)).unit != unit);
         ASSERT(list_empty(&nvc->waitq_elem));
         goto out;
     }
 
-    /* If v is in waitqueue, just get it out of there and bail */
+    /* If unit is in waitqueue, just get it out of there and bail */
     if ( unlikely(!list_empty(&nvc->waitq_elem)) )
     {
         spin_lock(&prv->waitq_lock);
@@ -535,7 +533,7 @@ static void null_unit_remove(const struct scheduler *ops,
         goto out;
     }
 
-    vcpu_deassign(prv, v);
+    unit_deassign(prv, unit);
 
  out:
     unit_schedule_unlock_irq(lock, unit);
@@ -546,14 +544,13 @@ static void null_unit_remove(const struct scheduler *ops,
 static void null_unit_wake(const struct scheduler *ops,
                            struct sched_unit *unit)
 {
-    struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
     struct null_unit *nvc = null_unit(unit);
-    unsigned int cpu = v->processor;
+    unsigned int cpu = sched_unit_cpu(unit);
 
-    ASSERT(!is_idle_vcpu(v));
+    ASSERT(!is_idle_unit(unit));
 
-    if ( unlikely(curr_on_cpu(cpu) == unit) )
+    if ( unlikely(curr_on_cpu(sched_unit_cpu(unit)) == unit) )
     {
         SCHED_STAT_CRANK(unit_wake_running);
         return;
@@ -566,33 +563,33 @@ static void null_unit_wake(const struct scheduler *ops,
         return;
     }
 
-    if ( likely(vcpu_runnable(v)) )
+    if ( likely(unit_runnable(unit)) )
         SCHED_STAT_CRANK(unit_wake_runnable);
     else
         SCHED_STAT_CRANK(unit_wake_not_runnable);
 
     /*
-     * If a vcpu is neither on a pCPU nor in the waitqueue, it means it was
+     * If a unit is neither on a pCPU nor in the waitqueue, it means it was
      * offline, and that it is now coming back being online.
      */
-    if ( unlikely(per_cpu(npc, cpu).vcpu != v && list_empty(&nvc->waitq_elem)) )
+    if ( unlikely(per_cpu(npc, cpu).unit != unit && list_empty(&nvc->waitq_elem)) )
     {
         spin_lock(&prv->waitq_lock);
         list_add_tail(&nvc->waitq_elem, &prv->waitq);
         spin_unlock(&prv->waitq_lock);
 
         cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
-                    cpupool_domain_cpumask(v->domain));
+                    cpupool_domain_cpumask(unit->domain));
 
         if ( !cpumask_intersects(&prv->cpus_free, cpumask_scratch_cpu(cpu)) )
         {
             dprintk(XENLOG_G_WARNING, "WARNING: d%dv%d not assigned to any CPU!\n",
-                    v->domain->domain_id, v->vcpu_id);
+                    unit->domain->domain_id, unit->unit_id);
             return;
         }
 
         /*
-         * Now we would want to assign the vcpu to cpu, but we can't, because
+         * Now we would want to assign the unit to cpu, but we can't, because
          * we don't have the lock. So, let's do the following:
          * - try to remove cpu from the list of free cpus, to avoid races with
          *   other onlining, inserting or migrating operations;
@@ -613,25 +610,24 @@ static void null_unit_wake(const struct scheduler *ops,
         }
     }
 
-    /* Note that we get here only for vCPUs assigned to a pCPU */
-    cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
+    /* Note that we get here only for units assigned to a pCPU */
+    cpu_raise_softirq(sched_unit_cpu(unit), SCHEDULE_SOFTIRQ);
 }
 
 static void null_unit_sleep(const struct scheduler *ops,
                             struct sched_unit *unit)
 {
-    struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
-    unsigned int cpu = v->processor;
+    unsigned int cpu = sched_unit_cpu(unit);
     bool tickled = false;
 
-    ASSERT(!is_idle_vcpu(v));
+    ASSERT(!is_idle_unit(unit));
 
-    /* 
-     * Check if the vcpu is in the process of being offlined. if yes,
+    /*
+     * Check if the unit is in the process of being offlined. If yes,
      * we need to remove it from either its pCPU or the waitqueue.
      */
-    if ( unlikely(!is_vcpu_online(v)) )
+    if ( unlikely(!is_unit_online(unit)) )
     {
         struct null_unit *nvc = null_unit(unit);
 
@@ -641,11 +637,11 @@ static void null_unit_sleep(const struct scheduler *ops,
             list_del_init(&nvc->waitq_elem);
             spin_unlock(&prv->waitq_lock);
         }
-        else if ( per_cpu(npc, cpu).vcpu == v )
-            tickled = vcpu_deassign(prv, v);
+        else if ( per_cpu(npc, cpu).unit == unit )
+            tickled = unit_deassign(prv, unit);
     }
 
-    /* If v is not assigned to a pCPU, or is not running, no need to bother */
+    /* If unit is not assigned to a pCPU, or is not running, no need to bother */
     if ( likely(!tickled && curr_on_cpu(cpu) == unit) )
         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
 
@@ -655,42 +651,41 @@ static void null_unit_sleep(const struct scheduler *ops,
 static struct sched_resource *
 null_res_pick(const struct scheduler *ops, struct sched_unit *unit)
 {
-    ASSERT(!is_idle_vcpu(unit->vcpu_list));
+    ASSERT(!is_idle_unit(unit));
     return pick_res(null_priv(ops), unit);
 }
 
 static void null_unit_migrate(const struct scheduler *ops,
                               struct sched_unit *unit, unsigned int new_cpu)
 {
-    struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
     struct null_unit *nvc = null_unit(unit);
 
-    ASSERT(!is_idle_vcpu(v));
+    ASSERT(!is_idle_unit(unit));
 
-    if ( v->processor == new_cpu )
+    if ( sched_unit_cpu(unit) == new_cpu )
         return;
 
     if ( unlikely(tb_init_done) )
     {
         struct {
-            uint16_t vcpu, dom;
+            uint16_t unit, dom;
             uint16_t cpu, new_cpu;
         } d;
-        d.dom = v->domain->domain_id;
-        d.vcpu = v->vcpu_id;
-        d.cpu = v->processor;
+        d.dom = unit->domain->domain_id;
+        d.unit = unit->unit_id;
+        d.cpu = sched_unit_cpu(unit);
         d.new_cpu = new_cpu;
         __trace_var(TRC_SNULL_MIGRATE, 1, sizeof(d), &d);
     }
 
     /*
-     * If v is assigned to a pCPU, then such pCPU becomes free, and we
+     * If unit is assigned to a pCPU, then such pCPU becomes free, and we
      * should look in the waitqueue if anyone else can be assigned to it.
      */
-    if ( likely(per_cpu(npc, v->processor).vcpu == v) )
+    if ( likely(per_cpu(npc, sched_unit_cpu(unit)).unit == unit) )
     {
-        vcpu_deassign(prv, v);
+        unit_deassign(prv, unit);
         SCHED_STAT_CRANK(migrate_running);
     }
     else if ( !list_empty(&nvc->waitq_elem) )
@@ -699,13 +694,13 @@ static void null_unit_migrate(const struct scheduler *ops,
     SCHED_STAT_CRANK(migrated);
 
     /*
-     * If a vcpu is (going) offline, we want it to be neither assigned
+     * If a unit is (going) offline, we want it to be neither assigned
      * to a pCPU, nor in the waitqueue.
      *
      * If it was on a cpu, we've removed it from there above. If it is
      * in the waitqueue, we remove it from there now. And then we bail.
      */
-    if ( unlikely(!is_vcpu_online(v)) )
+    if ( unlikely(!is_unit_online(unit)) )
     {
         spin_lock(&prv->waitq_lock);
         list_del_init(&nvc->waitq_elem);
@@ -714,32 +709,34 @@ static void null_unit_migrate(const struct scheduler *ops,
     }
 
     /*
-     * Let's now consider new_cpu, which is where v is being sent. It can be
-     * either free, or have a vCPU already assigned to it.
+     * Let's now consider new_cpu, which is where unit is being sent. It can be
+     * either free, or have a unit already assigned to it.
      *
-     * In the former case, we should assign v to it, and try to get it to run,
+     * In the former case we should assign unit to it, and try to get it to run,
      * if possible, according to affinity.
      *
-     * In latter, all we can do is to park v in the waitqueue.
+     * In latter, all we can do is to park unit in the waitqueue.
      */
-    if ( per_cpu(npc, new_cpu).vcpu == NULL &&
-         vcpu_check_affinity(v, new_cpu, BALANCE_HARD_AFFINITY) )
+    if ( per_cpu(npc, new_cpu).unit == NULL &&
+         unit_check_affinity(unit, new_cpu, BALANCE_HARD_AFFINITY) )
     {
-        /* v might have been in the waitqueue, so remove it */
+        /* unit might have been in the waitqueue, so remove it */
         spin_lock(&prv->waitq_lock);
         list_del_init(&nvc->waitq_elem);
         spin_unlock(&prv->waitq_lock);
 
-        vcpu_assign(prv, v, new_cpu);
+        unit_assign(prv, unit, new_cpu);
     }
     else
     {
-        /* Put v in the waitqueue, if it wasn't there already */
+        /* Put unit in the waitqueue, if it wasn't there already */
         spin_lock(&prv->waitq_lock);
         if ( list_empty(&nvc->waitq_elem) )
         {
             list_add_tail(&nvc->waitq_elem, &prv->waitq);
-            dprintk(XENLOG_G_WARNING, "WARNING: %pv not assigned to any CPU!\n", v);
+            dprintk(XENLOG_G_WARNING,
+                    "WARNING: %pdv%d not assigned to any CPU!\n", unit->domain,
+                    unit->unit_id);
         }
         spin_unlock(&prv->waitq_lock);
     }
@@ -753,35 +750,34 @@ static void null_unit_migrate(const struct scheduler *ops,
      * by this, will be fixed-up during resume.
      */
  out:
-    v->processor = new_cpu;
-    unit->res = get_sched_res(new_cpu);
+    sched_set_res(unit, get_sched_res(new_cpu));
 }
 
 #ifndef NDEBUG
-static inline void null_vcpu_check(struct vcpu *v)
+static inline void null_unit_check(struct sched_unit *unit)
 {
-    struct null_unit * const nvc = null_unit(v->sched_unit);
-    struct null_dom * const ndom = v->domain->sched_priv;
+    struct null_unit * const nvc = null_unit(unit);
+    struct null_dom * const ndom = unit->domain->sched_priv;
 
-    BUG_ON(nvc->vcpu != v);
+    BUG_ON(nvc->unit != unit);
 
     if ( ndom )
-        BUG_ON(is_idle_vcpu(v));
+        BUG_ON(is_idle_unit(unit));
     else
-        BUG_ON(!is_idle_vcpu(v));
+        BUG_ON(!is_idle_unit(unit));
 
     SCHED_STAT_CRANK(unit_check);
 }
-#define NULL_VCPU_CHECK(v)  (null_vcpu_check(v))
+#define NULL_UNIT_CHECK(unit)  (null_unit_check(unit))
 #else
-#define NULL_VCPU_CHECK(v)
+#define NULL_UNIT_CHECK(unit)
 #endif
 
 
 /*
  * The most simple scheduling function of all times! We either return:
- *  - the vCPU assigned to the pCPU, if there's one and it can run;
- *  - the idle vCPU, otherwise.
+ *  - the unit assigned to the pCPU, if there's one and it can run;
+ *  - the idle unit, otherwise.
  */
 static struct task_slice null_schedule(const struct scheduler *ops,
                                        s_time_t now,
@@ -794,24 +790,24 @@ static struct task_slice null_schedule(const struct scheduler *ops,
     struct task_slice ret;
 
     SCHED_STAT_CRANK(schedule);
-    NULL_VCPU_CHECK(current);
+    NULL_UNIT_CHECK(current->sched_unit);
 
     if ( unlikely(tb_init_done) )
     {
         struct {
             uint16_t tasklet, cpu;
-            int16_t vcpu, dom;
+            int16_t unit, dom;
         } d;
         d.cpu = cpu;
         d.tasklet = tasklet_work_scheduled;
-        if ( per_cpu(npc, cpu).vcpu == NULL )
+        if ( per_cpu(npc, cpu).unit == NULL )
         {
-            d.vcpu = d.dom = -1;
+            d.unit = d.dom = -1;
         }
         else
         {
-            d.vcpu = per_cpu(npc, cpu).vcpu->vcpu_id;
-            d.dom = per_cpu(npc, cpu).vcpu->domain->domain_id;
+            d.unit = per_cpu(npc, cpu).unit->unit_id;
+            d.dom = per_cpu(npc, cpu).unit->domain->domain_id;
         }
         __trace_var(TRC_SNULL_SCHEDULE, 1, sizeof(d), &d);
     }
@@ -819,16 +815,16 @@ static struct task_slice null_schedule(const struct scheduler *ops,
     if ( tasklet_work_scheduled )
     {
         trace_var(TRC_SNULL_TASKLET, 1, 0, NULL);
-        ret.task = idle_vcpu[cpu]->sched_unit;
+        ret.task = sched_idle_unit(cpu);
     }
     else
-        ret.task = per_cpu(npc, cpu).vcpu->sched_unit;
+        ret.task = per_cpu(npc, cpu).unit;
     ret.migrated = 0;
     ret.time = -1;
 
     /*
      * We may be new in the cpupool, or just coming back online. In which
-     * case, there may be vCPUs in the waitqueue that we can assign to us
+     * case, there may be units in the waitqueue that we can assign to us
      * and run.
      */
     if ( unlikely(ret.task == NULL) )
@@ -839,10 +835,10 @@ static struct task_slice null_schedule(const struct scheduler *ops,
             goto unlock;
 
         /*
-         * We scan the waitqueue twice, for prioritizing vcpus that have
+         * We scan the waitqueue twice, for prioritizing units that have
          * soft-affinity with cpu. This may look like something expensive to
-         * do here in null_schedule(), but it's actually fine, beceuse we do
-         * it only in cases where a pcpu has no vcpu associated (e.g., as
+         * do here in null_schedule(), but it's actually fine, because we do
+         * it only in cases where a pcpu has no unit associated (e.g., as
          * said above, the cpu has just joined a cpupool).
          */
         for_each_affinity_balance_step( bs )
@@ -850,14 +846,14 @@ static struct task_slice null_schedule(const struct scheduler *ops,
             list_for_each_entry( wvc, &prv->waitq, waitq_elem )
             {
                 if ( bs == BALANCE_SOFT_AFFINITY &&
-                     !has_soft_affinity(wvc->vcpu->sched_unit) )
+                     !has_soft_affinity(wvc->unit) )
                     continue;
 
-                if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
+                if ( unit_check_affinity(wvc->unit, cpu, bs) )
                 {
-                    vcpu_assign(prv, wvc->vcpu, cpu);
+                    unit_assign(prv, wvc->unit, cpu);
                     list_del_init(&wvc->waitq_elem);
-                    ret.task = wvc->vcpu->sched_unit;
+                    ret.task = wvc->unit;
                     goto unlock;
                 }
             }
@@ -870,17 +866,17 @@ static struct task_slice null_schedule(const struct scheduler *ops,
     }
 
     if ( unlikely(ret.task == NULL || !unit_runnable(ret.task)) )
-        ret.task = idle_vcpu[cpu]->sched_unit;
+        ret.task = sched_idle_unit(cpu);
 
-    NULL_VCPU_CHECK(ret.task->vcpu_list);
+    NULL_UNIT_CHECK(ret.task);
     return ret;
 }
 
-static inline void dump_vcpu(struct null_private *prv, struct null_unit *nvc)
+static inline void dump_unit(struct null_private *prv, struct null_unit *nvc)
 {
-    printk("[%i.%i] pcpu=%d", nvc->vcpu->domain->domain_id,
-            nvc->vcpu->vcpu_id, list_empty(&nvc->waitq_elem) ?
-                                nvc->vcpu->processor : -1);
+    printk("[%i.%i] pcpu=%d", nvc->unit->domain->domain_id,
+            nvc->unit->unit_id, list_empty(&nvc->waitq_elem) ?
+                                sched_unit_cpu(nvc->unit) : -1);
 }
 
 static void null_dump_pcpu(const struct scheduler *ops, int cpu)
@@ -895,16 +891,17 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
     printk("CPU[%02d] sibling=%*pb, core=%*pb",
            cpu, CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
            CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
-    if ( per_cpu(npc, cpu).vcpu != NULL )
-        printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu);
+    if ( per_cpu(npc, cpu).unit != NULL )
+        printk(", unit=%pdv%d", per_cpu(npc, cpu).unit->domain,
+               per_cpu(npc, cpu).unit->unit_id);
     printk("\n");
 
-    /* current VCPU (nothing to say if that's the idle vcpu) */
+    /* current unit (nothing to say if that's the idle unit) */
     nvc = null_unit(curr_on_cpu(cpu));
-    if ( nvc && !is_idle_vcpu(nvc->vcpu) )
+    if ( nvc && !is_idle_unit(nvc->unit) )
     {
         printk("\trun: ");
-        dump_vcpu(prv, nvc);
+        dump_unit(prv, nvc);
         printk("\n");
     }
 
@@ -927,23 +924,23 @@ static void null_dump(const struct scheduler *ops)
     list_for_each( iter, &prv->ndom )
     {
         struct null_dom *ndom;
-        struct vcpu *v;
+        struct sched_unit *unit;
 
         ndom = list_entry(iter, struct null_dom, ndom_elem);
 
         printk("\tDomain: %d\n", ndom->dom->domain_id);
-        for_each_vcpu( ndom->dom, v )
+        for_each_sched_unit( ndom->dom, unit )
         {
-            struct null_unit * const nvc = null_unit(v->sched_unit);
+            struct null_unit * const nvc = null_unit(unit);
             spinlock_t *lock;
 
-            lock = unit_schedule_lock(nvc->vcpu->sched_unit);
+            lock = unit_schedule_lock(unit);
 
             printk("\t%3d: ", ++loop);
-            dump_vcpu(prv, nvc);
+            dump_unit(prv, nvc);
             printk("\n");
 
-            unit_schedule_unlock(lock, nvc->vcpu->sched_unit);
+            unit_schedule_unlock(lock, unit);
         }
     }
 
@@ -958,7 +955,7 @@ static void null_dump(const struct scheduler *ops)
             printk(", ");
         if ( loop % 24 == 0 )
             printk("\n\t");
-        printk("%pv", nvc->vcpu);
+        printk("%pdv%d", nvc->unit->domain, nvc->unit->unit_id);
     }
     printk("\n");
     spin_unlock(&prv->waitq_lock);
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-08-09 14:59 UTC|newest]

Thread overview: 126+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-09 14:57 [Xen-devel] [PATCH v2 00/48] xen: add core scheduling support Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 01/48] xen/sched: use new sched_unit instead of vcpu in scheduler interfaces Juergen Gross
2019-09-02  9:07   ` Jan Beulich
2019-09-09  5:26     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 02/48] xen/sched: move per-vcpu scheduler private data pointer to sched_unit Juergen Gross
2019-08-23 10:47   ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 03/48] xen/sched: build a linked list of struct sched_unit Juergen Gross
2019-08-23 10:52   ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 04/48] xen/sched: introduce struct sched_resource Juergen Gross
2019-08-23 10:54   ` Dario Faggioli
2019-09-04 13:10   ` Jan Beulich
2019-09-09  5:31     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 05/48] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-09-04 13:34   ` Jan Beulich
2019-09-09  5:43     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 06/48] xen/sched: switch schedule_data.curr to point at sched_unit Juergen Gross
2019-09-04 13:36   ` Jan Beulich
2019-09-09  5:46     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 07/48] xen/sched: move per cpu scheduler private data into struct sched_resource Juergen Gross
2019-09-04 13:48   ` Jan Beulich
2019-09-05  7:13     ` Juergen Gross
2019-09-05  7:38       ` Jan Beulich
2019-09-09 13:03         ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 08/48] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock Juergen Gross
2019-09-04 14:02   ` Jan Beulich
2019-09-04 14:41     ` Juergen Gross
2019-09-04 14:54       ` Jan Beulich
2019-09-04 15:02         ` Juergen Gross
2019-09-11 16:02           ` Dario Faggioli
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 09/48] xen/sched: move some per-vcpu items to struct sched_unit Juergen Gross
2019-09-04 14:16   ` Jan Beulich
2019-09-09  6:39     ` Juergen Gross
2019-09-09  6:55       ` Jan Beulich
2019-09-09  7:05         ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 10/48] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-09-04 14:49   ` Jan Beulich
2019-09-11 13:22     ` Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 11/48] xen/sched: rename scheduler related perf counters Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 12/48] xen/sched: switch struct task_slice from vcpu to sched_unit Juergen Gross
2019-08-09 14:57 ` [Xen-devel] [PATCH v2 13/48] xen/sched: add is_running indicator to struct sched_unit Juergen Gross
2019-09-04 15:06   ` Jan Beulich
2019-09-11 13:44     ` Juergen Gross
2019-09-11 15:06       ` Jan Beulich
2019-09-11 15:32         ` Juergen Gross
2019-08-09 14:57 ` Juergen Gross [this message]
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 15/48] xen/sched: make rt scheduler vcpu agnostic Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 16/48] xen/sched: make credit " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 17/48] xen/sched: make credit2 " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 18/48] xen/sched: make arinc653 " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 19/48] xen: add sched_unit_pause_nosync() and sched_unit_unpause() Juergen Gross
2019-09-09 13:34   ` Jan Beulich
2019-09-11 14:15     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 20/48] xen: let vcpu_create() select processor Juergen Gross
2019-08-23 16:42   ` Julien Grall
2019-09-09 13:38   ` Jan Beulich
2019-09-11 14:22     ` Juergen Gross
2019-09-11 17:20       ` Dario Faggioli
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 21/48] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-09-09 14:17   ` Jan Beulich
2019-09-12  9:34     ` Juergen Gross
2019-09-12 10:04       ` Jan Beulich
2019-09-12 11:03         ` Juergen Gross
2019-09-12 11:17         ` Juergen Gross
2019-09-12 11:46           ` Jan Beulich
2019-09-12 11:53             ` Juergen Gross
2019-09-12 12:08               ` Jan Beulich
2019-09-12 12:13                 ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 22/48] xen/sched: switch schedule() from vcpus to sched_units Juergen Gross
2019-09-09 14:35   ` Jan Beulich
2019-09-12 13:44     ` Juergen Gross
2019-09-12 14:34       ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 23/48] xen/sched: switch sched_move_irqs() to take sched_unit as parameter Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 24/48] xen: switch from for_each_vcpu() to for_each_sched_unit() Juergen Gross
2019-09-09 15:14   ` Jan Beulich
2019-09-12 14:02     ` Juergen Gross
2019-09-12 14:40       ` Jan Beulich
2019-09-12 14:47         ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 25/48] xen/sched: add runstate counters to struct sched_unit Juergen Gross
2019-09-09 14:30   ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 26/48] xen/sched: rework and rename vcpu_force_reschedule() Juergen Gross
2019-09-10 14:06   ` Jan Beulich
2019-09-13  9:33     ` Juergen Gross
2019-09-13  9:40       ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 27/48] xen/sched: Change vcpu_migrate_*() to operate on schedule unit Juergen Gross
2019-09-10 15:11   ` Jan Beulich
2019-09-13 12:33     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 28/48] xen/sched: move struct task_slice into struct sched_unit Juergen Gross
2019-09-10 15:18   ` Jan Beulich
2019-09-13 12:56     ` Juergen Gross
2019-09-12  8:13   ` Dario Faggioli
2019-09-12  8:21     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 29/48] xen/sched: add code to sync scheduling of all vcpus of a sched unit Juergen Gross
2019-09-10 15:36   ` Jan Beulich
2019-09-13 13:12     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 30/48] xen/sched: introduce unit_runnable_state() Juergen Gross
2019-09-11 10:30   ` Jan Beulich
2019-09-12 10:22     ` Dario Faggioli
2019-09-13 14:07     ` Juergen Gross
2019-09-13 14:44       ` Jan Beulich
2019-09-13 15:23         ` Juergen Gross
2019-09-12 10:24   ` Dario Faggioli
2019-09-13 14:14     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 31/48] xen/sched: add support for multiple vcpus per sched unit where missing Juergen Gross
2019-09-11 10:43   ` Jan Beulich
2019-09-13 15:01     ` Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 32/48] xen/sched: modify cpupool_domain_cpumask() to be an unit mask Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 33/48] xen/sched: support allocating multiple vcpus into one sched unit Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 34/48] xen/sched: add a percpu resource index Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 35/48] xen/sched: add fall back to idle vcpu when scheduling unit Juergen Gross
2019-09-11 11:33   ` Julien Grall
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 36/48] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 37/48] xen/sched: carve out freeing sched_unit memory into dedicated function Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 38/48] xen/sched: move per-cpu variable scheduler to struct sched_resource Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 39/48] xen/sched: move per-cpu variable cpupool " Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 40/48] xen/sched: reject switching smt on/off with core scheduling active Juergen Gross
2019-09-10 15:47   ` Jan Beulich
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 41/48] xen/sched: prepare per-cpupool scheduling granularity Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 42/48] xen/sched: split schedule_cpu_switch() Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 43/48] xen/sched: protect scheduling resource via rcu Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 44/48] xen/sched: support multiple cpus per scheduling resource Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 45/48] xen/sched: support differing granularity in schedule_cpu_[add/rm]() Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 46/48] xen/sched: support core scheduling for moving cpus to/from cpupools Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 47/48] xen/sched: disable scheduling when entering ACPI deep sleep states Juergen Gross
2019-08-09 14:58 ` [Xen-devel] [PATCH v2 48/48] xen/sched: add scheduling granularity enum Juergen Gross
2019-08-15 10:17 ` [Xen-devel] [PATCH v2 00/48] xen: add core scheduling support Sergey Dyasli
2019-09-05  6:22   ` Juergen Gross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190809145833.1020-15-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.