xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	George Dunlap <George.Dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>, Tim Deegan <tim@xen.org>,
	Julien Grall <julien.grall@arm.com>,
	Jan Beulich <jbeulich@suse.com>,
	Dario Faggioli <dfaggioli@suse.com>
Subject: [Xen-devel] [PATCH 30/60] xen: switch from for_each_vcpu() to for_each_sched_unit()
Date: Tue, 28 May 2019 12:32:43 +0200	[thread overview]
Message-ID: <20190528103313.1343-31-jgross@suse.com> (raw)
Message-ID: <20190528103243.Y6ahsXkxPKEczuhdOZuUY-K9n-z0UjYRNJZ6k6Mkzvc@z> (raw)
In-Reply-To: <20190528103313.1343-1-jgross@suse.com>

Where appropriate switch from for_each_vcpu() to for_each_sched_unit()
in order to prepare core scheduling.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/domain.c   |   9 ++---
 xen/common/schedule.c | 109 ++++++++++++++++++++++++++------------------------
 2 files changed, 60 insertions(+), 58 deletions(-)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index 2d3427eb0f..f55ff06513 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -509,7 +509,7 @@ void domain_update_node_affinity(struct domain *d)
     cpumask_var_t dom_cpumask, dom_cpumask_soft;
     cpumask_t *dom_affinity;
     const cpumask_t *online;
-    struct vcpu *v;
+    struct sched_unit *unit;
     unsigned int cpu;
 
     /* Do we have vcpus already? If not, no need to update node-affinity. */
@@ -542,12 +542,11 @@ void domain_update_node_affinity(struct domain *d)
          * and the full mask of where it would prefer to run (the union of
          * the soft affinity of all its various vcpus). Let's build them.
          */
-        for_each_vcpu ( d, v )
+        for_each_sched_unit ( d, unit )
         {
-            cpumask_or(dom_cpumask, dom_cpumask,
-                       v->sched_unit->cpu_hard_affinity);
+            cpumask_or(dom_cpumask, dom_cpumask, unit->cpu_hard_affinity);
             cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
-                       v->sched_unit->cpu_soft_affinity);
+                       unit->cpu_soft_affinity);
         }
         /* Filter out non-online cpus */
         cpumask_and(dom_cpumask, dom_cpumask, online);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 266932fe25..63c9c0a8fb 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -433,16 +433,17 @@ static void sched_move_irqs(struct sched_unit *unit)
 int sched_move_domain(struct domain *d, struct cpupool *c)
 {
     struct vcpu *v;
+    struct sched_unit *unit;
     unsigned int new_p;
-    void **vcpu_priv;
+    void **unit_priv;
     void *domdata;
-    void *vcpudata;
+    void *unitdata;
     struct scheduler *old_ops;
     void *old_domdata;
 
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
-        if ( v->sched_unit->affinity_broken )
+        if ( unit->affinity_broken )
             return -EBUSY;
     }
 
@@ -450,22 +451,21 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
     if ( IS_ERR(domdata) )
         return PTR_ERR(domdata);
 
-    vcpu_priv = xzalloc_array(void *, d->max_vcpus);
-    if ( vcpu_priv == NULL )
+    unit_priv = xzalloc_array(void *, d->max_vcpus);
+    if ( unit_priv == NULL )
     {
         sched_free_domdata(c->sched, domdata);
         return -ENOMEM;
     }
 
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
-        vcpu_priv[v->vcpu_id] = sched_alloc_vdata(c->sched, v->sched_unit,
-                                                  domdata);
-        if ( vcpu_priv[v->vcpu_id] == NULL )
+        unit_priv[unit->unit_id] = sched_alloc_vdata(c->sched, unit, domdata);
+        if ( unit_priv[unit->unit_id] == NULL )
         {
-            for_each_vcpu ( d, v )
-                xfree(vcpu_priv[v->vcpu_id]);
-            xfree(vcpu_priv);
+            for_each_sched_unit ( d, unit )
+                xfree(unit_priv[unit->unit_id]);
+            xfree(unit_priv);
             sched_free_domdata(c->sched, domdata);
             return -ENOMEM;
         }
@@ -476,30 +476,35 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
     old_ops = dom_scheduler(d);
     old_domdata = d->sched_priv;
 
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
-        sched_remove_unit(old_ops, v->sched_unit);
+        sched_remove_unit(old_ops, unit);
     }
 
     d->cpupool = c;
     d->sched_priv = domdata;
 
     new_p = cpumask_first(c->cpu_valid);
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
         spinlock_t *lock;
+        unsigned int unit_p = new_p;
 
-        vcpudata = v->sched_unit->priv;
+        unitdata = unit->priv;
 
-        migrate_timer(&v->periodic_timer, new_p);
-        migrate_timer(&v->singleshot_timer, new_p);
-        migrate_timer(&v->poll_timer, new_p);
+        for_each_sched_unit_vcpu ( unit, v )
+        {
+            migrate_timer(&v->periodic_timer, new_p);
+            migrate_timer(&v->singleshot_timer, new_p);
+            migrate_timer(&v->poll_timer, new_p);
+            new_p = cpumask_cycle(new_p, c->cpu_valid);
+        }
 
-        lock = unit_schedule_lock_irq(v->sched_unit);
+        lock = unit_schedule_lock_irq(unit);
 
-        sched_set_affinity(v, &cpumask_all, &cpumask_all);
+        sched_set_affinity(unit->vcpu, &cpumask_all, &cpumask_all);
 
-        sched_set_res(v->sched_unit, get_sched_res(new_p));
+        sched_set_res(unit, get_sched_res(unit_p));
         /*
          * With v->processor modified we must not
          * - make any further changes assuming we hold the scheduler lock,
@@ -507,15 +512,13 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
          */
         spin_unlock_irq(lock);
 
-        v->sched_unit->priv = vcpu_priv[v->vcpu_id];
+        unit->priv = unit_priv[unit->unit_id];
         if ( !d->is_dying )
-            sched_move_irqs(v->sched_unit);
-
-        new_p = cpumask_cycle(new_p, c->cpu_valid);
+            sched_move_irqs(unit);
 
-        sched_insert_unit(c->sched, v->sched_unit);
+        sched_insert_unit(c->sched, unit);
 
-        sched_free_vdata(old_ops, vcpudata);
+        sched_free_vdata(old_ops, unitdata);
     }
 
     domain_update_node_affinity(d);
@@ -524,7 +527,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
     sched_free_domdata(old_ops, old_domdata);
 
-    xfree(vcpu_priv);
+    xfree(unit_priv);
 
     return 0;
 }
@@ -829,15 +832,14 @@ void vcpu_force_reschedule(struct vcpu *v)
 void restore_vcpu_affinity(struct domain *d)
 {
     unsigned int cpu = smp_processor_id();
-    struct vcpu *v;
+    struct sched_unit *unit;
 
     ASSERT(system_state == SYS_STATE_resume);
 
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
         spinlock_t *lock;
-        unsigned int old_cpu = v->processor;
-        struct sched_unit *unit = v->sched_unit;
+        unsigned int old_cpu = sched_unit_cpu(unit);
         struct sched_resource *res;
 
         ASSERT(!unit_runnable(unit));
@@ -856,7 +858,8 @@ void restore_vcpu_affinity(struct domain *d)
         {
             if ( unit->affinity_broken )
             {
-                sched_set_affinity(v, unit->cpu_hard_affinity_saved, NULL);
+                sched_set_affinity(unit->vcpu, unit->cpu_hard_affinity_saved,
+                                   NULL);
                 unit->affinity_broken = 0;
                 cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
@@ -864,8 +867,8 @@ void restore_vcpu_affinity(struct domain *d)
 
             if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
             {
-                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
-                sched_set_affinity(v, &cpumask_all, NULL);
+                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", unit->vcpu);
+                sched_set_affinity(unit->vcpu, &cpumask_all, NULL);
                 cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
             }
@@ -875,12 +878,12 @@ void restore_vcpu_affinity(struct domain *d)
         sched_set_res(unit, res);
 
         lock = unit_schedule_lock_irq(unit);
-        res = sched_pick_resource(vcpu_scheduler(v), unit);
+        res = sched_pick_resource(vcpu_scheduler(unit->vcpu), unit);
         sched_set_res(unit, res);
         spin_unlock_irq(lock);
 
-        if ( old_cpu != v->processor )
-            sched_move_irqs(v->sched_unit);
+        if ( old_cpu != sched_unit_cpu(unit) )
+            sched_move_irqs(unit);
     }
 
     domain_update_node_affinity(d);
@@ -894,7 +897,6 @@ void restore_vcpu_affinity(struct domain *d)
 int cpu_disable_scheduler(unsigned int cpu)
 {
     struct domain *d;
-    struct vcpu *v;
     struct cpupool *c;
     cpumask_t online_affinity;
     int ret = 0;
@@ -905,10 +907,11 @@ int cpu_disable_scheduler(unsigned int cpu)
 
     for_each_domain_in_cpupool ( d, c )
     {
-        for_each_vcpu ( d, v )
+        struct sched_unit *unit;
+
+        for_each_sched_unit ( d, unit )
         {
             unsigned long flags;
-            struct sched_unit *unit = v->sched_unit;
             spinlock_t *lock = unit_schedule_lock_irqsave(unit, &flags);
 
             cpumask_and(&online_affinity, unit->cpu_hard_affinity, c->cpu_valid);
@@ -923,14 +926,14 @@ int cpu_disable_scheduler(unsigned int cpu)
                     break;
                 }
 
-                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
+                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", unit->vcpu);
 
-                sched_set_affinity(v, &cpumask_all, NULL);
+                sched_set_affinity(unit->vcpu, &cpumask_all, NULL);
             }
 
-            if ( v->processor != cpu )
+            if ( sched_unit_cpu(unit) != sched_get_resource_cpu(cpu) )
             {
-                /* The vcpu is not on this cpu, so we can move on. */
+                /* The unit is not on this cpu, so we can move on. */
                 unit_schedule_unlock_irqrestore(lock, flags, unit);
                 continue;
             }
@@ -943,17 +946,17 @@ int cpu_disable_scheduler(unsigned int cpu)
              *  * the scheduler will always find a suitable solution, or
              *    things would have failed before getting in here.
              */
-            vcpu_migrate_start(v);
+            vcpu_migrate_start(unit->vcpu);
             unit_schedule_unlock_irqrestore(lock, flags, unit);
 
-            vcpu_migrate_finish(v);
+            vcpu_migrate_finish(unit->vcpu);
 
             /*
              * The only caveat, in this case, is that if a vcpu active in
              * the hypervisor isn't migratable. In this case, the caller
              * should try again after releasing and reaquiring all locks.
              */
-            if ( v->processor == cpu )
+            if ( sched_unit_cpu(unit) == sched_get_resource_cpu(cpu) )
                 ret = -EAGAIN;
         }
     }
@@ -964,16 +967,16 @@ int cpu_disable_scheduler(unsigned int cpu)
 static int cpu_disable_scheduler_check(unsigned int cpu)
 {
     struct domain *d;
-    struct vcpu *v;
     struct cpupool *c;
+    struct sched_unit *unit;
 
     c = per_cpu(cpupool, cpu);
     if ( c == NULL )
         return 0;
 
     for_each_domain_in_cpupool ( d, c )
-        for_each_vcpu ( d, v )
-            if ( v->sched_unit->affinity_broken )
+        for_each_sched_unit ( d, unit )
+            if ( unit->affinity_broken )
                 return -EADDRINUSE;
 
     return 0;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-05-28 10:34 UTC|newest]

Thread overview: 202+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-28 10:32 [PATCH 00/60] xen: add core scheduling support Juergen Gross
2019-05-28 10:32 ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 01/60] xen/sched: only allow schedulers with all mandatory functions available Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:03   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 02/60] xen/sched: add inline wrappers for calling per-scheduler functions Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:21   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 03/60] xen/sched: let sched_switch_sched() return new lock address Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-11 16:55   ` Dario Faggioli
2019-06-12  7:40     ` Jan Beulich
2019-06-12  8:06       ` Juergen Gross
2019-06-12  9:44         ` Dario Faggioli
2019-06-12  8:05   ` Andrew Cooper
2019-06-12  8:19     ` Juergen Gross
2019-06-12  9:32       ` Jan Beulich
     [not found]       ` <5D00C6960200007800237622@suse.com>
2019-06-12  9:56         ` Dario Faggioli
2019-06-12 10:14           ` Jan Beulich
2019-06-12 11:27             ` Andrew Cooper
2019-06-12 13:32               ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 04/60] xen/sched: use new sched_unit instead of vcpu in scheduler interfaces Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 17:44   ` Dario Faggioli
2019-07-19  4:49     ` Juergen Gross
2019-07-19 17:01       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 05/60] xen/sched: alloc struct sched_unit for each vcpu Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 17:57   ` Dario Faggioli
2019-07-19  4:56     ` Juergen Gross
2019-07-19 17:04       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 06/60] xen/sched: move per-vcpu scheduler private data pointer to sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-18 22:52   ` Dario Faggioli
2019-07-19  5:03     ` Juergen Gross
2019-07-19 17:10       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 07/60] xen/sched: build a linked list of struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19  0:01   ` Dario Faggioli
2019-07-19  5:07     ` Juergen Gross
2019-07-19 17:16       ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 08/60] xen/sched: introduce struct sched_resource Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19 17:43   ` Dario Faggioli
2019-07-19 17:49   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 09/60] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-19 18:06   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 10/60] xen/sched: switch schedule_data.curr to point at sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-29 22:08   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 11/60] xen/sched: move per cpu scheduler private data into struct sched_resource Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 11:32   ` Jan Beulich
2019-05-28 11:32     ` [Xen-devel] " Jan Beulich
2019-07-29 22:22   ` Dario Faggioli
2019-05-28 10:32 ` [PATCH 12/60] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 13/60] xen/sched: move some per-vcpu items to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-06-13  7:18   ` Andrii Anisov
2019-06-13  7:29     ` Juergen Gross
2019-06-13  7:34       ` Andrii Anisov
2019-06-13  8:39         ` Juergen Gross
2019-06-13  8:49           ` Andrii Anisov
2019-05-28 10:32 ` [PATCH 14/60] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 15/60] xen/sched: add domain pointer to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 16/60] xen/sched: add id " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 17/60] xen/sched: rename scheduler related perf counters Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 18/60] xen/sched: switch struct task_slice from vcpu to sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 19/60] xen/sched: add is_running indicator to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 20/60] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 21/60] xen/sched: make rt " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 22/60] xen/sched: make credit " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 23/60] xen/sched: make credit2 " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 24/60] xen/sched: make arinc653 " Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 25/60] xen: add sched_unit_pause_nosync() and sched_unit_unpause() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 26/60] xen: let vcpu_create() select processor Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 27/60] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 28/60] xen/sched: switch schedule() from vcpus to sched_units Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 29/60] xen/sched: switch sched_move_irqs() to take sched_unit as parameter Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` Juergen Gross [this message]
2019-05-28 10:32   ` [Xen-devel] [PATCH 30/60] xen: switch from for_each_vcpu() to for_each_sched_unit() Juergen Gross
2019-05-28 10:32 ` [PATCH 31/60] xen/sched: add runstate counters to struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 32/60] xen/sched: rework and rename vcpu_force_reschedule() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 33/60] xen/sched: Change vcpu_migrate_*() to operate on schedule unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 34/60] xen/sched: move struct task_slice into struct sched_unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 35/60] xen/sched: add code to sync scheduling of all vcpus of a sched unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 36/60] xen/sched: introduce unit_runnable_state() Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 37/60] xen/sched: add support for multiple vcpus per sched unit where missing Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 38/60] x86: make loading of GDT at context switch more modular Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-02 15:38   ` Andrew Cooper
2019-05-28 10:32 ` [PATCH 39/60] x86: optimize loading of GDT at context switch Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-07-02 16:09   ` Andrew Cooper
2019-07-03  6:30     ` Juergen Gross
2019-07-03 12:21       ` Andrew Cooper
2019-07-05  7:30         ` Juergen Gross
2019-05-28 10:32 ` [PATCH 40/60] xen/sched: modify cpupool_domain_cpumask() to be an unit mask Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 41/60] xen/sched: support allocating multiple vcpus into one sched unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 42/60] xen/sched: add a scheduler_percpu_init() function Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 43/60] xen/sched: add a percpu resource index Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 44/60] xen/sched: add fall back to idle vcpu when scheduling unit Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 45/60] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:32 ` [PATCH 46/60] xen/sched: carve out freeing sched_unit memory into dedicated function Juergen Gross
2019-05-28 10:32   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 47/60] xen/sched: move per-cpu variable scheduler to struct sched_resource Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 48/60] xen/sched: move per-cpu variable cpupool " Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 49/60] xen/sched: reject switching smt on/off with core scheduling active Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:44   ` Jan Beulich
2019-05-28 11:44     ` [Xen-devel] " Jan Beulich
2019-05-28 11:52     ` Juergen Gross
2019-05-28 11:52       ` [Xen-devel] " Juergen Gross
2019-06-12  9:36       ` Dario Faggioli
2019-05-28 10:33 ` [PATCH 50/60] xen/sched: prepare per-cpupool scheduling granularity Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 51/60] xen/sched: use one schedule lock for all free cpus Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 52/60] xen/sched: populate cpupool0 only after all cpus are up Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 53/60] xen/sched: remove cpu from pool0 before removing it Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 54/60] xen/sched: add minimalistic idle scheduler for free cpus Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:47   ` Jan Beulich
2019-05-28 11:47     ` [Xen-devel] " Jan Beulich
2019-05-28 11:58     ` Juergen Gross
2019-05-28 11:58       ` [Xen-devel] " Juergen Gross
2019-05-31 14:15       ` Dario Faggioli
2019-05-31 14:15         ` [Xen-devel] " Dario Faggioli
2019-05-31 15:52   ` Dario Faggioli
2019-05-31 15:52     ` [Xen-devel] " Dario Faggioli
2019-05-31 16:44     ` Juergen Gross
2019-05-31 16:44       ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 55/60] xen/sched: split schedule_cpu_switch() Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 56/60] xen/sched: protect scheduling resource via rcu Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 57/60] xen/sched: support multiple cpus per scheduling resource Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 58/60] xen/sched: support differing granularity in schedule_cpu_[add/rm]() Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 59/60] xen/sched: support core scheduling for moving cpus to/from cpupools Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 10:33 ` [PATCH 60/60] xen/sched: add scheduling granularity enum Juergen Gross
2019-05-28 10:33   ` [Xen-devel] " Juergen Gross
2019-05-28 11:51   ` Jan Beulich
2019-05-28 11:51     ` [Xen-devel] " Jan Beulich
2019-05-28 12:02     ` Juergen Gross
2019-05-28 12:02       ` [Xen-devel] " Juergen Gross
2019-07-19 18:31   ` Dario Faggioli
2019-07-05 13:17 ` [Xen-devel] [PATCH 00/60] xen: add core scheduling support Sergey Dyasli
2019-07-05 13:22   ` Juergen Gross
2019-07-05 13:56   ` Dario Faggioli
2019-07-15 14:08     ` Sergey Dyasli
2019-07-18 14:48       ` Juergen Gross
2019-07-18 15:14         ` Sergey Dyasli
2019-07-18 16:04           ` Dario Faggioli
2019-07-19  5:41           ` Juergen Gross
2019-07-19 11:24             ` Juergen Gross
2019-07-19 13:57           ` Juergen Gross
2019-07-22 14:22             ` Sergey Dyasli
2019-07-24  9:13               ` Juergen Gross
2019-07-24 14:54                 ` Sergey Dyasli
2019-07-24 15:11                   ` Juergen Gross
2019-07-16 15:45   ` Sergey Dyasli
2019-07-19 13:35     ` Juergen Gross
2019-07-25 16:01   ` Sergey Dyasli
2019-07-11 13:40 ` Dario Faggioli
     [not found] <20190528103313.1343„1„jgross@suse.com>
     [not found] ` <20190528103313.1343„4„jgross@suse.com>
     [not found] <20190528103313.13431jgross@suse.com>
     [not found] ` <20190528103313.13434jgross@suse.com>

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190528103313.1343-31-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).