All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>, Tim Deegan <tim@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Wei Liu <wei.liu2@citrix.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Dario Faggioli <dfaggioli@suse.com>,
	Julien Grall <julien.grall@arm.com>,
	Meng Xu <mengxu@cis.upenn.edu>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH RFC V2 10/45] xen/sched: switch vcpu_schedule_lock to item_schedule_lock
Date: Mon,  6 May 2019 08:56:09 +0200	[thread overview]
Message-ID: <20190506065644.7415-11-jgross@suse.com> (raw)
In-Reply-To: <20190506065644.7415-1-jgross@suse.com>

Rename vcpu_schedule_[un]lock[_irq]() to item_schedule_[un]lock[_irq]()
and let it take a sched_item pointer instead of a vcpu pointer as
parameter.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched_credit.c  | 17 +++++++++--------
 xen/common/sched_credit2.c | 40 +++++++++++++++++++--------------------
 xen/common/sched_null.c    | 14 +++++++-------
 xen/common/sched_rt.c      | 15 +++++++--------
 xen/common/schedule.c      | 47 +++++++++++++++++++++++-----------------------
 xen/include/xen/sched-if.h | 12 ++++++------
 6 files changed, 73 insertions(+), 72 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index e8369b3648..de4face2bc 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -940,7 +940,8 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
 static void
 csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
 {
-    struct csched_item * const svc = CSCHED_ITEM(current->sched_item);
+    struct sched_item *curritem = current->sched_item;
+    struct csched_item * const svc = CSCHED_ITEM(curritem);
     const struct scheduler *ops = per_cpu(scheduler, cpu);
 
     ASSERT( current->processor == cpu );
@@ -976,7 +977,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
     {
         unsigned int new_cpu;
         unsigned long flags;
-        spinlock_t *lock = vcpu_schedule_lock_irqsave(current, &flags);
+        spinlock_t *lock = item_schedule_lock_irqsave(curritem, &flags);
 
         /*
          * If it's been active a while, check if we'd be better off
@@ -985,7 +986,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
          */
         new_cpu = _csched_cpu_pick(ops, current, 0);
 
-        vcpu_schedule_unlock_irqrestore(lock, flags, current);
+        item_schedule_unlock_irqrestore(lock, flags, curritem);
 
         if ( new_cpu != cpu )
         {
@@ -1037,19 +1038,19 @@ csched_item_insert(const struct scheduler *ops, struct sched_item *item)
     BUG_ON( is_idle_vcpu(vc) );
 
     /* csched_res_pick() looks in vc->processor's runq, so we need the lock. */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     item->res = csched_res_pick(ops, item);
     vc->processor = item->res->processor;
 
     spin_unlock_irq(lock);
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
         runq_insert(svc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     SCHED_STAT_CRANK(vcpu_insert);
 }
@@ -2145,12 +2146,12 @@ csched_dump(const struct scheduler *ops)
             spinlock_t *lock;
 
             svc = list_entry(iter_svc, struct csched_item, active_vcpu_elem);
-            lock = vcpu_schedule_lock(svc->vcpu);
+            lock = item_schedule_lock(svc->vcpu->sched_item);
 
             printk("\t%3d: ", ++loop);
             csched_dump_vcpu(svc);
 
-            vcpu_schedule_unlock(lock, svc->vcpu);
+            item_schedule_unlock(lock, svc->vcpu->sched_item);
         }
     }
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index df0e7282ce..6106293b3f 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -171,7 +171,7 @@
  * - runqueue lock
  *  + it is per-runqueue, so:
  *   * cpus in a runqueue take the runqueue lock, when using
- *     pcpu_schedule_lock() / vcpu_schedule_lock() (and friends),
+ *     pcpu_schedule_lock() / item_schedule_lock() (and friends),
  *   * a cpu may (try to) take a "remote" runqueue lock, e.g., for
  *     load balancing;
  *  + serializes runqueue operations (removing and inserting vcpus);
@@ -1890,7 +1890,7 @@ unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
         unsigned long flags;
         s_time_t now;
 
-        lock = vcpu_schedule_lock_irqsave(svc->vcpu, &flags);
+        lock = item_schedule_lock_irqsave(svc->vcpu->sched_item, &flags);
 
         __clear_bit(_VPF_parked, &svc->vcpu->pause_flags);
         if ( unlikely(svc->flags & CSFLAG_scheduled) )
@@ -1923,7 +1923,7 @@ unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
         }
         list_del_init(&svc->parked_elem);
 
-        vcpu_schedule_unlock_irqrestore(lock, flags, svc->vcpu);
+        item_schedule_unlock_irqrestore(lock, flags, svc->vcpu->sched_item);
     }
 }
 
@@ -2162,7 +2162,7 @@ csched2_context_saved(const struct scheduler *ops, struct sched_item *item)
 {
     struct vcpu *vc = item->vcpu;
     struct csched2_item * const svc = csched2_item(item);
-    spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+    spinlock_t *lock = item_schedule_lock_irq(item);
     s_time_t now = NOW();
     LIST_HEAD(were_parked);
 
@@ -2194,7 +2194,7 @@ csched2_context_saved(const struct scheduler *ops, struct sched_item *item)
     else if ( !is_idle_vcpu(vc) )
         update_load(ops, svc->rqd, svc, -1, now);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     unpark_parked_vcpus(ops, &were_parked);
 }
@@ -2847,14 +2847,14 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 struct csched2_item *svc = csched2_item(v->sched_item);
-                spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+                spinlock_t *lock = item_schedule_lock(svc->vcpu->sched_item);
 
                 ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
 
                 svc->weight = sdom->weight;
                 update_max_weight(svc->rqd, svc->weight, old_weight);
 
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                item_schedule_unlock(lock, svc->vcpu->sched_item);
             }
         }
         /* Cap */
@@ -2885,7 +2885,7 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 svc = csched2_item(v->sched_item);
-                lock = vcpu_schedule_lock(svc->vcpu);
+                lock = item_schedule_lock(svc->vcpu->sched_item);
                 /*
                  * Too small quotas would in theory cause a lot of overhead,
                  * which then won't happen because, in csched2_runtime(),
@@ -2893,7 +2893,7 @@ csched2_dom_cntl(
                  */
                 svc->budget_quota = max(sdom->tot_budget / sdom->nr_vcpus,
                                         CSCHED2_MIN_TIMER);
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                item_schedule_unlock(lock, svc->vcpu->sched_item);
             }
 
             if ( sdom->cap == 0 )
@@ -2928,7 +2928,7 @@ csched2_dom_cntl(
                 for_each_vcpu ( d, v )
                 {
                     svc = csched2_item(v->sched_item);
-                    lock = vcpu_schedule_lock(svc->vcpu);
+                    lock = item_schedule_lock(svc->vcpu->sched_item);
                     if ( v->is_running )
                     {
                         unsigned int cpu = v->processor;
@@ -2959,7 +2959,7 @@ csched2_dom_cntl(
                         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
                     }
                     svc->budget = 0;
-                    vcpu_schedule_unlock(lock, svc->vcpu);
+                    item_schedule_unlock(lock, svc->vcpu->sched_item);
                 }
             }
 
@@ -2975,12 +2975,12 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 struct csched2_item *svc = csched2_item(v->sched_item);
-                spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+                spinlock_t *lock = item_schedule_lock(svc->vcpu->sched_item);
 
                 svc->budget = STIME_MAX;
                 svc->budget_quota = 0;
 
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                item_schedule_unlock(lock, svc->vcpu->sched_item);
             }
             sdom->cap = 0;
             /*
@@ -3119,19 +3119,19 @@ csched2_item_insert(const struct scheduler *ops, struct sched_item *item)
     ASSERT(list_empty(&svc->runq_elem));
 
     /* csched2_res_pick() expects the pcpu lock to be held */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     item->res = csched2_res_pick(ops, item);
     vc->processor = item->res->processor;
 
     spin_unlock_irq(lock);
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     /* Add vcpu to runqueue of initial processor */
     runq_assign(ops, vc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     sdom->nr_vcpus++;
 
@@ -3161,11 +3161,11 @@ csched2_item_remove(const struct scheduler *ops, struct sched_item *item)
     SCHED_STAT_CRANK(vcpu_remove);
 
     /* Remove from runqueue */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     runq_deassign(ops, vc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     svc->sdom->nr_vcpus--;
 }
@@ -3749,12 +3749,12 @@ csched2_dump(const struct scheduler *ops)
             struct csched2_item * const svc = csched2_item(v->sched_item);
             spinlock_t *lock;
 
-            lock = vcpu_schedule_lock(svc->vcpu);
+            lock = item_schedule_lock(svc->vcpu->sched_item);
 
             printk("\t%3d: ", ++loop);
             csched2_dump_vcpu(prv, svc);
 
-            vcpu_schedule_unlock(lock, svc->vcpu);
+            item_schedule_unlock(lock, svc->vcpu->sched_item);
         }
     }
 
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index a9cfa163b9..620925e8ce 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -317,7 +317,7 @@ pick_res(struct null_private *prv, struct sched_item *item)
      * all the pCPUs are busy.
      *
      * In fact, there must always be something sane in v->processor, or
-     * vcpu_schedule_lock() and friends won't work. This is not a problem,
+     * item_schedule_lock() and friends won't work. This is not a problem,
      * as we will actually assign the vCPU to the pCPU we return from here,
      * only if the pCPU is free.
      */
@@ -428,7 +428,7 @@ static void null_item_insert(const struct scheduler *ops,
 
     ASSERT(!is_idle_vcpu(v));
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = item_schedule_lock_irq(item);
  retry:
 
     item->res = pick_res(prv, item);
@@ -436,7 +436,7 @@ static void null_item_insert(const struct scheduler *ops,
 
     spin_unlock(lock);
 
-    lock = vcpu_schedule_lock(v);
+    lock = item_schedule_lock(item);
 
     cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
                 cpupool_domain_cpumask(v->domain));
@@ -522,7 +522,7 @@ static void null_item_remove(const struct scheduler *ops,
 
     ASSERT(!is_idle_vcpu(v));
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = item_schedule_lock_irq(item);
 
     /* If v is in waitqueue, just get it out of there and bail */
     if ( unlikely(!list_empty(&nvc->waitq_elem)) )
@@ -540,7 +540,7 @@ static void null_item_remove(const struct scheduler *ops,
     _vcpu_remove(prv, v);
 
  out:
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, item);
 
     SCHED_STAT_CRANK(vcpu_remove);
 }
@@ -860,13 +860,13 @@ static void null_dump(const struct scheduler *ops)
             struct null_item * const nvc = null_item(v->sched_item);
             spinlock_t *lock;
 
-            lock = vcpu_schedule_lock(nvc->vcpu);
+            lock = item_schedule_lock(nvc->vcpu->sched_item);
 
             printk("\t%3d: ", ++loop);
             dump_vcpu(prv, nvc);
             printk("\n");
 
-            vcpu_schedule_unlock(lock, nvc->vcpu);
+            item_schedule_unlock(lock, nvc->vcpu->sched_item);
         }
     }
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 0019646b52..a604a0d5a6 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -177,7 +177,7 @@ static void repl_timer_handler(void *data);
 /*
  * System-wide private data, include global RunQueue/DepletedQ
  * Global lock is referenced by sched_res->schedule_lock from all
- * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
+ * physical cpus. It can be grabbed via item_schedule_lock_irq()
  */
 struct rt_private {
     spinlock_t lock;            /* the global coarse-grained lock */
@@ -904,7 +904,7 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
     item->res = rt_res_pick(ops, item);
     vc->processor = item->res->processor;
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     now = NOW();
     if ( now >= svc->cur_deadline )
@@ -917,7 +917,7 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
         if ( !vc->is_running )
             runq_insert(ops, svc);
     }
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     SCHED_STAT_CRANK(vcpu_insert);
 }
@@ -928,7 +928,6 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
 static void
 rt_item_remove(const struct scheduler *ops, struct sched_item *item)
 {
-    struct vcpu *vc = item->vcpu;
     struct rt_item * const svc = rt_item(item);
     struct rt_dom * const sdom = svc->sdom;
     spinlock_t *lock;
@@ -937,14 +936,14 @@ rt_item_remove(const struct scheduler *ops, struct sched_item *item)
 
     BUG_ON( sdom == NULL );
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
     if ( vcpu_on_q(svc) )
         q_remove(svc);
 
     if ( vcpu_on_replq(svc) )
         replq_remove(ops,svc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 }
 
 /*
@@ -1339,7 +1338,7 @@ rt_context_saved(const struct scheduler *ops, struct sched_item *item)
 {
     struct vcpu *vc = item->vcpu;
     struct rt_item *svc = rt_item(item);
-    spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+    spinlock_t *lock = item_schedule_lock_irq(item);
 
     __clear_bit(__RTDS_scheduled, &svc->flags);
     /* not insert idle vcpu to runq */
@@ -1356,7 +1355,7 @@ rt_context_saved(const struct scheduler *ops, struct sched_item *item)
         replq_remove(ops, svc);
 
 out:
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 }
 
 /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 51db98bcaa..464e358f70 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -194,7 +194,8 @@ static inline void vcpu_runstate_change(
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
 {
-    spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = likely(v == current)
+                       ? NULL : item_schedule_lock_irq(v->sched_item);
     s_time_t delta;
 
     memcpy(runstate, &v->runstate, sizeof(*runstate));
@@ -203,7 +204,7 @@ void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
         runstate->time[runstate->state] += delta;
 
     if ( unlikely(lock != NULL) )
-        vcpu_schedule_unlock_irq(lock, v);
+        item_schedule_unlock_irq(lock, v->sched_item);
 }
 
 uint64_t get_cpu_idle_time(unsigned int cpu)
@@ -415,7 +416,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
         migrate_timer(&v->singleshot_timer, new_p);
         migrate_timer(&v->poll_timer, new_p);
 
-        lock = vcpu_schedule_lock_irq(v);
+        lock = item_schedule_lock_irq(v->sched_item);
 
         sched_set_affinity(v, &cpumask_all, &cpumask_all);
 
@@ -424,7 +425,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
         /*
          * With v->processor modified we must not
          * - make any further changes assuming we hold the scheduler lock,
-         * - use vcpu_schedule_unlock_irq().
+         * - use item_schedule_unlock_irq().
          */
         spin_unlock_irq(lock);
 
@@ -523,11 +524,11 @@ void vcpu_sleep_nosync(struct vcpu *v)
 
     TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
 
-    lock = vcpu_schedule_lock_irqsave(v, &flags);
+    lock = item_schedule_lock_irqsave(v->sched_item, &flags);
 
     vcpu_sleep_nosync_locked(v);
 
-    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+    item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
 }
 
 void vcpu_sleep_sync(struct vcpu *v)
@@ -547,7 +548,7 @@ void vcpu_wake(struct vcpu *v)
 
     TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
 
-    lock = vcpu_schedule_lock_irqsave(v, &flags);
+    lock = item_schedule_lock_irqsave(v->sched_item, &flags);
 
     if ( likely(vcpu_runnable(v)) )
     {
@@ -561,7 +562,7 @@ void vcpu_wake(struct vcpu *v)
             vcpu_runstate_change(v, RUNSTATE_offline, NOW());
     }
 
-    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+    item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
 }
 
 void vcpu_unblock(struct vcpu *v)
@@ -629,9 +630,9 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
  * These steps are encapsulated in the following two functions; they
  * should be called like this:
  *
- *     lock = vcpu_schedule_lock_irq(v);
+ *     lock = item_schedule_lock_irq(item);
  *     vcpu_migrate_start(v);
- *     vcpu_schedule_unlock_irq(lock, v)
+ *     item_schedule_unlock_irq(lock, item)
  *     vcpu_migrate_finish(v);
  *
  * vcpu_migrate_finish() will do the work now if it can, or simply
@@ -736,12 +737,12 @@ static void vcpu_migrate_finish(struct vcpu *v)
  */
 void vcpu_force_reschedule(struct vcpu *v)
 {
-    spinlock_t *lock = vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = item_schedule_lock_irq(v->sched_item);
 
     if ( v->is_running )
         vcpu_migrate_start(v);
 
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, v->sched_item);
 
     vcpu_migrate_finish(v);
 }
@@ -792,7 +793,7 @@ void restore_vcpu_affinity(struct domain *d)
         v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
         v->sched_item->res = per_cpu(sched_res, v->processor);
 
-        lock = vcpu_schedule_lock_irq(v);
+        lock = item_schedule_lock_irq(v->sched_item);
         v->sched_item->res = sched_pick_resource(vcpu_scheduler(v),
                                                  v->sched_item);
         v->processor = v->sched_item->res->processor;
@@ -827,7 +828,7 @@ int cpu_disable_scheduler(unsigned int cpu)
         for_each_vcpu ( d, v )
         {
             unsigned long flags;
-            spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
+            spinlock_t *lock = item_schedule_lock_irqsave(v->sched_item, &flags);
 
             cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
             if ( cpumask_empty(&online_affinity) &&
@@ -836,7 +837,7 @@ int cpu_disable_scheduler(unsigned int cpu)
                 if ( v->affinity_broken )
                 {
                     /* The vcpu is temporarily pinned, can't move it. */
-                    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+                    item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
                     ret = -EADDRINUSE;
                     break;
                 }
@@ -849,7 +850,7 @@ int cpu_disable_scheduler(unsigned int cpu)
             if ( v->processor != cpu )
             {
                 /* The vcpu is not on this cpu, so we can move on. */
-                vcpu_schedule_unlock_irqrestore(lock, flags, v);
+                item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
                 continue;
             }
 
@@ -862,7 +863,7 @@ int cpu_disable_scheduler(unsigned int cpu)
              *    things would have failed before getting in here.
              */
             vcpu_migrate_start(v);
-            vcpu_schedule_unlock_irqrestore(lock, flags, v);
+            item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
 
             vcpu_migrate_finish(v);
 
@@ -926,7 +927,7 @@ static int vcpu_set_affinity(
     spinlock_t *lock;
     int ret = 0;
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = item_schedule_lock_irq(v->sched_item);
 
     if ( v->affinity_broken )
         ret = -EBUSY;
@@ -948,7 +949,7 @@ static int vcpu_set_affinity(
         vcpu_migrate_start(v);
     }
 
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, v->sched_item);
 
     domain_update_node_affinity(v->domain);
 
@@ -1080,10 +1081,10 @@ static long do_poll(struct sched_poll *sched_poll)
 long vcpu_yield(void)
 {
     struct vcpu * v=current;
-    spinlock_t *lock = vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = item_schedule_lock_irq(v->sched_item);
 
     sched_yield(vcpu_scheduler(v), v->sched_item);
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, v->sched_item);
 
     SCHED_STAT_CRANK(vcpu_yield);
 
@@ -1169,7 +1170,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu)
     spinlock_t *lock;
     int ret = -EINVAL;
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = item_schedule_lock_irq(v->sched_item);
 
     if ( cpu < 0 )
     {
@@ -1196,7 +1197,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu)
     if ( ret == 0 )
         vcpu_migrate_start(v);
 
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, v->sched_item);
 
     domain_update_node_affinity(v->domain);
 
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 93617f0459..17f1ee8887 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -91,22 +91,22 @@ static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
 
 #define EXTRA_TYPE(arg)
 sched_lock(pcpu, unsigned int cpu,     cpu, )
-sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(item, const struct sched_item *i, i->res->processor, )
 sched_lock(pcpu, unsigned int cpu,     cpu,          _irq)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_lock(item, const struct sched_item *i, i->res->processor, _irq)
 sched_unlock(pcpu, unsigned int cpu,     cpu, )
-sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(item, const struct sched_item *i, i->res->processor, )
 sched_unlock(pcpu, unsigned int cpu,     cpu,          _irq)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(item, const struct sched_item *i, i->res->processor, _irq)
 #undef EXTRA_TYPE
 
 #define EXTRA_TYPE(arg) , unsigned long arg
 #define spin_unlock_irqsave spin_unlock_irqrestore
 sched_lock(pcpu, unsigned int cpu,     cpu,          _irqsave, *flags)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+sched_lock(item, const struct sched_item *i, i->res->processor, _irqsave, *flags)
 #undef spin_unlock_irqsave
 sched_unlock(pcpu, unsigned int cpu,     cpu,          _irqrestore, flags)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+sched_unlock(item, const struct sched_item *i, i->res->processor, _irqrestore, flags)
 #undef EXTRA_TYPE
 
 #undef sched_unlock
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

WARNING: multiple messages have this Message-ID (diff)
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>, Tim Deegan <tim@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Wei Liu <wei.liu2@citrix.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Dario Faggioli <dfaggioli@suse.com>,
	Julien Grall <julien.grall@arm.com>,
	Meng Xu <mengxu@cis.upenn.edu>, Jan Beulich <jbeulich@suse.com>
Subject: [Xen-devel] [PATCH RFC V2 10/45] xen/sched: switch vcpu_schedule_lock to item_schedule_lock
Date: Mon,  6 May 2019 08:56:09 +0200	[thread overview]
Message-ID: <20190506065644.7415-11-jgross@suse.com> (raw)
Message-ID: <20190506065609.uyeIzmjJQAH285N5ymMdaGLg5ALJh6nhFTRsCep8Vh4@z> (raw)
In-Reply-To: <20190506065644.7415-1-jgross@suse.com>

Rename vcpu_schedule_[un]lock[_irq]() to item_schedule_[un]lock[_irq]()
and let it take a sched_item pointer instead of a vcpu pointer as
parameter.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched_credit.c  | 17 +++++++++--------
 xen/common/sched_credit2.c | 40 +++++++++++++++++++--------------------
 xen/common/sched_null.c    | 14 +++++++-------
 xen/common/sched_rt.c      | 15 +++++++--------
 xen/common/schedule.c      | 47 +++++++++++++++++++++++-----------------------
 xen/include/xen/sched-if.h | 12 ++++++------
 6 files changed, 73 insertions(+), 72 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index e8369b3648..de4face2bc 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -940,7 +940,8 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
 static void
 csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
 {
-    struct csched_item * const svc = CSCHED_ITEM(current->sched_item);
+    struct sched_item *curritem = current->sched_item;
+    struct csched_item * const svc = CSCHED_ITEM(curritem);
     const struct scheduler *ops = per_cpu(scheduler, cpu);
 
     ASSERT( current->processor == cpu );
@@ -976,7 +977,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
     {
         unsigned int new_cpu;
         unsigned long flags;
-        spinlock_t *lock = vcpu_schedule_lock_irqsave(current, &flags);
+        spinlock_t *lock = item_schedule_lock_irqsave(curritem, &flags);
 
         /*
          * If it's been active a while, check if we'd be better off
@@ -985,7 +986,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
          */
         new_cpu = _csched_cpu_pick(ops, current, 0);
 
-        vcpu_schedule_unlock_irqrestore(lock, flags, current);
+        item_schedule_unlock_irqrestore(lock, flags, curritem);
 
         if ( new_cpu != cpu )
         {
@@ -1037,19 +1038,19 @@ csched_item_insert(const struct scheduler *ops, struct sched_item *item)
     BUG_ON( is_idle_vcpu(vc) );
 
     /* csched_res_pick() looks in vc->processor's runq, so we need the lock. */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     item->res = csched_res_pick(ops, item);
     vc->processor = item->res->processor;
 
     spin_unlock_irq(lock);
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
         runq_insert(svc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     SCHED_STAT_CRANK(vcpu_insert);
 }
@@ -2145,12 +2146,12 @@ csched_dump(const struct scheduler *ops)
             spinlock_t *lock;
 
             svc = list_entry(iter_svc, struct csched_item, active_vcpu_elem);
-            lock = vcpu_schedule_lock(svc->vcpu);
+            lock = item_schedule_lock(svc->vcpu->sched_item);
 
             printk("\t%3d: ", ++loop);
             csched_dump_vcpu(svc);
 
-            vcpu_schedule_unlock(lock, svc->vcpu);
+            item_schedule_unlock(lock, svc->vcpu->sched_item);
         }
     }
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index df0e7282ce..6106293b3f 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -171,7 +171,7 @@
  * - runqueue lock
  *  + it is per-runqueue, so:
  *   * cpus in a runqueue take the runqueue lock, when using
- *     pcpu_schedule_lock() / vcpu_schedule_lock() (and friends),
+ *     pcpu_schedule_lock() / item_schedule_lock() (and friends),
  *   * a cpu may (try to) take a "remote" runqueue lock, e.g., for
  *     load balancing;
  *  + serializes runqueue operations (removing and inserting vcpus);
@@ -1890,7 +1890,7 @@ unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
         unsigned long flags;
         s_time_t now;
 
-        lock = vcpu_schedule_lock_irqsave(svc->vcpu, &flags);
+        lock = item_schedule_lock_irqsave(svc->vcpu->sched_item, &flags);
 
         __clear_bit(_VPF_parked, &svc->vcpu->pause_flags);
         if ( unlikely(svc->flags & CSFLAG_scheduled) )
@@ -1923,7 +1923,7 @@ unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
         }
         list_del_init(&svc->parked_elem);
 
-        vcpu_schedule_unlock_irqrestore(lock, flags, svc->vcpu);
+        item_schedule_unlock_irqrestore(lock, flags, svc->vcpu->sched_item);
     }
 }
 
@@ -2162,7 +2162,7 @@ csched2_context_saved(const struct scheduler *ops, struct sched_item *item)
 {
     struct vcpu *vc = item->vcpu;
     struct csched2_item * const svc = csched2_item(item);
-    spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+    spinlock_t *lock = item_schedule_lock_irq(item);
     s_time_t now = NOW();
     LIST_HEAD(were_parked);
 
@@ -2194,7 +2194,7 @@ csched2_context_saved(const struct scheduler *ops, struct sched_item *item)
     else if ( !is_idle_vcpu(vc) )
         update_load(ops, svc->rqd, svc, -1, now);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     unpark_parked_vcpus(ops, &were_parked);
 }
@@ -2847,14 +2847,14 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 struct csched2_item *svc = csched2_item(v->sched_item);
-                spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+                spinlock_t *lock = item_schedule_lock(svc->vcpu->sched_item);
 
                 ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
 
                 svc->weight = sdom->weight;
                 update_max_weight(svc->rqd, svc->weight, old_weight);
 
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                item_schedule_unlock(lock, svc->vcpu->sched_item);
             }
         }
         /* Cap */
@@ -2885,7 +2885,7 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 svc = csched2_item(v->sched_item);
-                lock = vcpu_schedule_lock(svc->vcpu);
+                lock = item_schedule_lock(svc->vcpu->sched_item);
                 /*
                  * Too small quotas would in theory cause a lot of overhead,
                  * which then won't happen because, in csched2_runtime(),
@@ -2893,7 +2893,7 @@ csched2_dom_cntl(
                  */
                 svc->budget_quota = max(sdom->tot_budget / sdom->nr_vcpus,
                                         CSCHED2_MIN_TIMER);
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                item_schedule_unlock(lock, svc->vcpu->sched_item);
             }
 
             if ( sdom->cap == 0 )
@@ -2928,7 +2928,7 @@ csched2_dom_cntl(
                 for_each_vcpu ( d, v )
                 {
                     svc = csched2_item(v->sched_item);
-                    lock = vcpu_schedule_lock(svc->vcpu);
+                    lock = item_schedule_lock(svc->vcpu->sched_item);
                     if ( v->is_running )
                     {
                         unsigned int cpu = v->processor;
@@ -2959,7 +2959,7 @@ csched2_dom_cntl(
                         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
                     }
                     svc->budget = 0;
-                    vcpu_schedule_unlock(lock, svc->vcpu);
+                    item_schedule_unlock(lock, svc->vcpu->sched_item);
                 }
             }
 
@@ -2975,12 +2975,12 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 struct csched2_item *svc = csched2_item(v->sched_item);
-                spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+                spinlock_t *lock = item_schedule_lock(svc->vcpu->sched_item);
 
                 svc->budget = STIME_MAX;
                 svc->budget_quota = 0;
 
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                item_schedule_unlock(lock, svc->vcpu->sched_item);
             }
             sdom->cap = 0;
             /*
@@ -3119,19 +3119,19 @@ csched2_item_insert(const struct scheduler *ops, struct sched_item *item)
     ASSERT(list_empty(&svc->runq_elem));
 
     /* csched2_res_pick() expects the pcpu lock to be held */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     item->res = csched2_res_pick(ops, item);
     vc->processor = item->res->processor;
 
     spin_unlock_irq(lock);
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     /* Add vcpu to runqueue of initial processor */
     runq_assign(ops, vc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     sdom->nr_vcpus++;
 
@@ -3161,11 +3161,11 @@ csched2_item_remove(const struct scheduler *ops, struct sched_item *item)
     SCHED_STAT_CRANK(vcpu_remove);
 
     /* Remove from runqueue */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     runq_deassign(ops, vc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     svc->sdom->nr_vcpus--;
 }
@@ -3749,12 +3749,12 @@ csched2_dump(const struct scheduler *ops)
             struct csched2_item * const svc = csched2_item(v->sched_item);
             spinlock_t *lock;
 
-            lock = vcpu_schedule_lock(svc->vcpu);
+            lock = item_schedule_lock(svc->vcpu->sched_item);
 
             printk("\t%3d: ", ++loop);
             csched2_dump_vcpu(prv, svc);
 
-            vcpu_schedule_unlock(lock, svc->vcpu);
+            item_schedule_unlock(lock, svc->vcpu->sched_item);
         }
     }
 
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index a9cfa163b9..620925e8ce 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -317,7 +317,7 @@ pick_res(struct null_private *prv, struct sched_item *item)
      * all the pCPUs are busy.
      *
      * In fact, there must always be something sane in v->processor, or
-     * vcpu_schedule_lock() and friends won't work. This is not a problem,
+     * item_schedule_lock() and friends won't work. This is not a problem,
      * as we will actually assign the vCPU to the pCPU we return from here,
      * only if the pCPU is free.
      */
@@ -428,7 +428,7 @@ static void null_item_insert(const struct scheduler *ops,
 
     ASSERT(!is_idle_vcpu(v));
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = item_schedule_lock_irq(item);
  retry:
 
     item->res = pick_res(prv, item);
@@ -436,7 +436,7 @@ static void null_item_insert(const struct scheduler *ops,
 
     spin_unlock(lock);
 
-    lock = vcpu_schedule_lock(v);
+    lock = item_schedule_lock(item);
 
     cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
                 cpupool_domain_cpumask(v->domain));
@@ -522,7 +522,7 @@ static void null_item_remove(const struct scheduler *ops,
 
     ASSERT(!is_idle_vcpu(v));
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = item_schedule_lock_irq(item);
 
     /* If v is in waitqueue, just get it out of there and bail */
     if ( unlikely(!list_empty(&nvc->waitq_elem)) )
@@ -540,7 +540,7 @@ static void null_item_remove(const struct scheduler *ops,
     _vcpu_remove(prv, v);
 
  out:
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, item);
 
     SCHED_STAT_CRANK(vcpu_remove);
 }
@@ -860,13 +860,13 @@ static void null_dump(const struct scheduler *ops)
             struct null_item * const nvc = null_item(v->sched_item);
             spinlock_t *lock;
 
-            lock = vcpu_schedule_lock(nvc->vcpu);
+            lock = item_schedule_lock(nvc->vcpu->sched_item);
 
             printk("\t%3d: ", ++loop);
             dump_vcpu(prv, nvc);
             printk("\n");
 
-            vcpu_schedule_unlock(lock, nvc->vcpu);
+            item_schedule_unlock(lock, nvc->vcpu->sched_item);
         }
     }
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 0019646b52..a604a0d5a6 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -177,7 +177,7 @@ static void repl_timer_handler(void *data);
 /*
  * System-wide private data, include global RunQueue/DepletedQ
  * Global lock is referenced by sched_res->schedule_lock from all
- * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
+ * physical cpus. It can be grabbed via item_schedule_lock_irq()
  */
 struct rt_private {
     spinlock_t lock;            /* the global coarse-grained lock */
@@ -904,7 +904,7 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
     item->res = rt_res_pick(ops, item);
     vc->processor = item->res->processor;
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
 
     now = NOW();
     if ( now >= svc->cur_deadline )
@@ -917,7 +917,7 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
         if ( !vc->is_running )
             runq_insert(ops, svc);
     }
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 
     SCHED_STAT_CRANK(vcpu_insert);
 }
@@ -928,7 +928,6 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
 static void
 rt_item_remove(const struct scheduler *ops, struct sched_item *item)
 {
-    struct vcpu *vc = item->vcpu;
     struct rt_item * const svc = rt_item(item);
     struct rt_dom * const sdom = svc->sdom;
     spinlock_t *lock;
@@ -937,14 +936,14 @@ rt_item_remove(const struct scheduler *ops, struct sched_item *item)
 
     BUG_ON( sdom == NULL );
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = item_schedule_lock_irq(item);
     if ( vcpu_on_q(svc) )
         q_remove(svc);
 
     if ( vcpu_on_replq(svc) )
         replq_remove(ops,svc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 }
 
 /*
@@ -1339,7 +1338,7 @@ rt_context_saved(const struct scheduler *ops, struct sched_item *item)
 {
     struct vcpu *vc = item->vcpu;
     struct rt_item *svc = rt_item(item);
-    spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+    spinlock_t *lock = item_schedule_lock_irq(item);
 
     __clear_bit(__RTDS_scheduled, &svc->flags);
     /* not insert idle vcpu to runq */
@@ -1356,7 +1355,7 @@ rt_context_saved(const struct scheduler *ops, struct sched_item *item)
         replq_remove(ops, svc);
 
 out:
-    vcpu_schedule_unlock_irq(lock, vc);
+    item_schedule_unlock_irq(lock, item);
 }
 
 /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 51db98bcaa..464e358f70 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -194,7 +194,8 @@ static inline void vcpu_runstate_change(
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
 {
-    spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = likely(v == current)
+                       ? NULL : item_schedule_lock_irq(v->sched_item);
     s_time_t delta;
 
     memcpy(runstate, &v->runstate, sizeof(*runstate));
@@ -203,7 +204,7 @@ void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
         runstate->time[runstate->state] += delta;
 
     if ( unlikely(lock != NULL) )
-        vcpu_schedule_unlock_irq(lock, v);
+        item_schedule_unlock_irq(lock, v->sched_item);
 }
 
 uint64_t get_cpu_idle_time(unsigned int cpu)
@@ -415,7 +416,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
         migrate_timer(&v->singleshot_timer, new_p);
         migrate_timer(&v->poll_timer, new_p);
 
-        lock = vcpu_schedule_lock_irq(v);
+        lock = item_schedule_lock_irq(v->sched_item);
 
         sched_set_affinity(v, &cpumask_all, &cpumask_all);
 
@@ -424,7 +425,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
         /*
          * With v->processor modified we must not
          * - make any further changes assuming we hold the scheduler lock,
-         * - use vcpu_schedule_unlock_irq().
+         * - use item_schedule_unlock_irq().
          */
         spin_unlock_irq(lock);
 
@@ -523,11 +524,11 @@ void vcpu_sleep_nosync(struct vcpu *v)
 
     TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
 
-    lock = vcpu_schedule_lock_irqsave(v, &flags);
+    lock = item_schedule_lock_irqsave(v->sched_item, &flags);
 
     vcpu_sleep_nosync_locked(v);
 
-    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+    item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
 }
 
 void vcpu_sleep_sync(struct vcpu *v)
@@ -547,7 +548,7 @@ void vcpu_wake(struct vcpu *v)
 
     TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
 
-    lock = vcpu_schedule_lock_irqsave(v, &flags);
+    lock = item_schedule_lock_irqsave(v->sched_item, &flags);
 
     if ( likely(vcpu_runnable(v)) )
     {
@@ -561,7 +562,7 @@ void vcpu_wake(struct vcpu *v)
             vcpu_runstate_change(v, RUNSTATE_offline, NOW());
     }
 
-    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+    item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
 }
 
 void vcpu_unblock(struct vcpu *v)
@@ -629,9 +630,9 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
  * These steps are encapsulated in the following two functions; they
  * should be called like this:
  *
- *     lock = vcpu_schedule_lock_irq(v);
+ *     lock = item_schedule_lock_irq(item);
  *     vcpu_migrate_start(v);
- *     vcpu_schedule_unlock_irq(lock, v)
+ *     item_schedule_unlock_irq(lock, item)
  *     vcpu_migrate_finish(v);
  *
  * vcpu_migrate_finish() will do the work now if it can, or simply
@@ -736,12 +737,12 @@ static void vcpu_migrate_finish(struct vcpu *v)
  */
 void vcpu_force_reschedule(struct vcpu *v)
 {
-    spinlock_t *lock = vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = item_schedule_lock_irq(v->sched_item);
 
     if ( v->is_running )
         vcpu_migrate_start(v);
 
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, v->sched_item);
 
     vcpu_migrate_finish(v);
 }
@@ -792,7 +793,7 @@ void restore_vcpu_affinity(struct domain *d)
         v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
         v->sched_item->res = per_cpu(sched_res, v->processor);
 
-        lock = vcpu_schedule_lock_irq(v);
+        lock = item_schedule_lock_irq(v->sched_item);
         v->sched_item->res = sched_pick_resource(vcpu_scheduler(v),
                                                  v->sched_item);
         v->processor = v->sched_item->res->processor;
@@ -827,7 +828,7 @@ int cpu_disable_scheduler(unsigned int cpu)
         for_each_vcpu ( d, v )
         {
             unsigned long flags;
-            spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
+            spinlock_t *lock = item_schedule_lock_irqsave(v->sched_item, &flags);
 
             cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
             if ( cpumask_empty(&online_affinity) &&
@@ -836,7 +837,7 @@ int cpu_disable_scheduler(unsigned int cpu)
                 if ( v->affinity_broken )
                 {
                     /* The vcpu is temporarily pinned, can't move it. */
-                    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+                    item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
                     ret = -EADDRINUSE;
                     break;
                 }
@@ -849,7 +850,7 @@ int cpu_disable_scheduler(unsigned int cpu)
             if ( v->processor != cpu )
             {
                 /* The vcpu is not on this cpu, so we can move on. */
-                vcpu_schedule_unlock_irqrestore(lock, flags, v);
+                item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
                 continue;
             }
 
@@ -862,7 +863,7 @@ int cpu_disable_scheduler(unsigned int cpu)
              *    things would have failed before getting in here.
              */
             vcpu_migrate_start(v);
-            vcpu_schedule_unlock_irqrestore(lock, flags, v);
+            item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
 
             vcpu_migrate_finish(v);
 
@@ -926,7 +927,7 @@ static int vcpu_set_affinity(
     spinlock_t *lock;
     int ret = 0;
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = item_schedule_lock_irq(v->sched_item);
 
     if ( v->affinity_broken )
         ret = -EBUSY;
@@ -948,7 +949,7 @@ static int vcpu_set_affinity(
         vcpu_migrate_start(v);
     }
 
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, v->sched_item);
 
     domain_update_node_affinity(v->domain);
 
@@ -1080,10 +1081,10 @@ static long do_poll(struct sched_poll *sched_poll)
 long vcpu_yield(void)
 {
     struct vcpu * v=current;
-    spinlock_t *lock = vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = item_schedule_lock_irq(v->sched_item);
 
     sched_yield(vcpu_scheduler(v), v->sched_item);
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, v->sched_item);
 
     SCHED_STAT_CRANK(vcpu_yield);
 
@@ -1169,7 +1170,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu)
     spinlock_t *lock;
     int ret = -EINVAL;
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = item_schedule_lock_irq(v->sched_item);
 
     if ( cpu < 0 )
     {
@@ -1196,7 +1197,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu)
     if ( ret == 0 )
         vcpu_migrate_start(v);
 
-    vcpu_schedule_unlock_irq(lock, v);
+    item_schedule_unlock_irq(lock, v->sched_item);
 
     domain_update_node_affinity(v->domain);
 
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 93617f0459..17f1ee8887 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -91,22 +91,22 @@ static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
 
 #define EXTRA_TYPE(arg)
 sched_lock(pcpu, unsigned int cpu,     cpu, )
-sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(item, const struct sched_item *i, i->res->processor, )
 sched_lock(pcpu, unsigned int cpu,     cpu,          _irq)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_lock(item, const struct sched_item *i, i->res->processor, _irq)
 sched_unlock(pcpu, unsigned int cpu,     cpu, )
-sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(item, const struct sched_item *i, i->res->processor, )
 sched_unlock(pcpu, unsigned int cpu,     cpu,          _irq)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(item, const struct sched_item *i, i->res->processor, _irq)
 #undef EXTRA_TYPE
 
 #define EXTRA_TYPE(arg) , unsigned long arg
 #define spin_unlock_irqsave spin_unlock_irqrestore
 sched_lock(pcpu, unsigned int cpu,     cpu,          _irqsave, *flags)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+sched_lock(item, const struct sched_item *i, i->res->processor, _irqsave, *flags)
 #undef spin_unlock_irqsave
 sched_unlock(pcpu, unsigned int cpu,     cpu,          _irqrestore, flags)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+sched_unlock(item, const struct sched_item *i, i->res->processor, _irqrestore, flags)
 #undef EXTRA_TYPE
 
 #undef sched_unlock
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-05-06  6:56 UTC|newest]

Thread overview: 188+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-06  6:55 [PATCH RFC V2 00/45] xen: add core scheduling support Juergen Gross
2019-05-06  6:55 ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 01/45] xen/sched: add inline wrappers for calling per-scheduler functions Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  8:27   ` Jan Beulich
2019-05-06  8:27     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CCFF004020000780022C0D4@suse.com>
2019-05-06  8:34     ` Juergen Gross
2019-05-06  8:34       ` [Xen-devel] " Juergen Gross
2019-05-06  8:58       ` Jan Beulich
2019-05-06  8:58         ` [Xen-devel] " Jan Beulich
2019-05-06 15:26   ` Dario Faggioli
2019-05-06 15:26     ` [Xen-devel] " Dario Faggioli
2019-05-08 16:24   ` George Dunlap
2019-05-08 16:24     ` [Xen-devel] " George Dunlap
2019-05-09  5:32     ` Juergen Gross
2019-05-09  5:32       ` [Xen-devel] " Juergen Gross
2019-05-09 10:04       ` George Dunlap
2019-05-09 10:04         ` [Xen-devel] " George Dunlap
2019-05-09 10:56         ` Juergen Gross
2019-05-09 10:56           ` [Xen-devel] " Juergen Gross
2019-05-09 11:50           ` Jan Beulich
2019-05-09 11:50             ` [Xen-devel] " Jan Beulich
2019-05-09 12:03             ` Juergen Gross
2019-05-09 12:03               ` [Xen-devel] " Juergen Gross
2019-05-09 12:31               ` Jan Beulich
2019-05-09 12:31                 ` [Xen-devel] " Jan Beulich
     [not found]               ` <5CD41D9C020000780022D259@suse.com>
2019-05-09 12:44                 ` Juergen Gross
2019-05-09 12:44                   ` [Xen-devel] " Juergen Gross
2019-05-09 13:22                   ` Jan Beulich
2019-05-09 13:22                     ` [Xen-devel] " Jan Beulich
2019-05-09 12:27           ` Dario Faggioli
2019-05-09 12:27             ` [Xen-devel] " Dario Faggioli
2019-05-06  6:56 ` [PATCH RFC V2 02/45] xen/sched: use new sched_item instead of vcpu in scheduler interfaces Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-08 16:35   ` George Dunlap
2019-05-08 16:35     ` [Xen-devel] " George Dunlap
2019-05-08 17:27     ` Dario Faggioli
2019-05-08 17:27       ` [Xen-devel] " Dario Faggioli
2019-05-09  5:36     ` Juergen Gross
2019-05-09  5:36       ` [Xen-devel] " Juergen Gross
2019-05-09  5:56       ` Dario Faggioli
2019-05-09  5:56         ` [Xen-devel] " Dario Faggioli
2019-05-09  5:59         ` Juergen Gross
2019-05-09  5:59           ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 03/45] xen/sched: alloc struct sched_item for each vcpu Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 04/45] xen/sched: move per-vcpu scheduler private data pointer to sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 05/45] xen/sched: build a linked list of struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 06/45] xen/sched: introduce struct sched_resource Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 07/45] xen/sched: let pick_cpu return a scheduler resource Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 08/45] xen/sched: switch schedule_data.curr to point at sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 09/45] xen/sched: move per cpu scheduler private data into struct sched_resource Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` Juergen Gross [this message]
2019-05-06  6:56   ` [Xen-devel] [PATCH RFC V2 10/45] xen/sched: switch vcpu_schedule_lock to item_schedule_lock Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 11/45] xen/sched: move some per-vcpu items to struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 12/45] xen/sched: add scheduler helpers hiding vcpu Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 13/45] xen/sched: add domain pointer to struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 14/45] xen/sched: add id " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 15/45] xen/sched: rename scheduler related perf counters Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 16/45] xen/sched: switch struct task_slice from vcpu to sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 17/45] xen/sched: add is_running indicator to struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 18/45] xen/sched: make null scheduler vcpu agnostic Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 19/45] xen/sched: make rt " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 20/45] xen/sched: make credit " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 21/45] xen/sched: make credit2 " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 22/45] xen/sched: make arinc653 " Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 23/45] xen: add sched_item_pause_nosync() and sched_item_unpause() Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 24/45] xen: let vcpu_create() select processor Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-16 12:20   ` Jan Beulich
2019-05-16 12:20     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CDD557D020000780022FA32@suse.com>
2019-05-16 12:46     ` Juergen Gross
2019-05-16 12:46       ` [Xen-devel] " Juergen Gross
2019-05-16 13:10       ` Jan Beulich
2019-05-16 13:10         ` [Xen-devel] " Jan Beulich
2019-05-06  6:56 ` [PATCH RFC V2 25/45] xen/sched: use sched_resource cpu instead smp_processor_id in schedulers Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 26/45] xen/sched: switch schedule() from vcpus to sched_items Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 27/45] xen/sched: switch sched_move_irqs() to take sched_item as parameter Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 28/45] xen: switch from for_each_vcpu() to for_each_sched_item() Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 29/45] xen/sched: add runstate counters to struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 30/45] xen/sched: rework and rename vcpu_force_reschedule() Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  8:37   ` Jan Beulich
2019-05-06  8:37     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CCFF238020000780022C0F9@suse.com>
2019-05-06  8:51     ` Juergen Gross
2019-05-06  8:51       ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 31/45] xen/sched: Change vcpu_migrate_*() to operate on schedule item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 32/45] xen/sched: move struct task_slice into struct sched_item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 33/45] xen/sched: add code to sync scheduling of all vcpus of a sched item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 34/45] xen/sched: introduce item_runnable_state() Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 35/45] xen/sched: add support for multiple vcpus per sched item where missing Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 36/45] x86: make loading of GDT at context switch more modular Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-16 12:30   ` Jan Beulich
2019-05-16 12:30     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CDD57DB020000780022FA5E@suse.com>
2019-05-16 12:52     ` Juergen Gross
2019-05-16 12:52       ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 37/45] x86: optimize loading of GDT at context switch Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-16 12:42   ` Jan Beulich
2019-05-16 12:42     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CDD5AC2020000780022FA6A@suse.com>
2019-05-16 13:10     ` Juergen Gross
2019-05-16 13:10       ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 38/45] xen/sched: modify cpupool_domain_cpumask() to be an item mask Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 39/45] xen/sched: support allocating multiple vcpus into one sched item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 40/45] xen/sched: add a scheduler_percpu_init() function Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 41/45] xen/sched: add a percpu resource index Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 42/45] xen/sched: add fall back to idle vcpu when scheduling item Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-16 13:05   ` Jan Beulich
2019-05-16 13:05     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CDD6005020000780022FA9A@suse.com>
2019-05-16 13:51     ` Juergen Gross
2019-05-16 13:51       ` [Xen-devel] " Juergen Gross
2019-05-16 14:41       ` Jan Beulich
2019-05-16 14:41         ` [Xen-devel] " Jan Beulich
     [not found]       ` <5CDD7693020000780022FC59@suse.com>
2019-05-17  5:13         ` Juergen Gross
2019-05-17  5:13           ` [Xen-devel] " Juergen Gross
2019-05-17  6:57           ` Jan Beulich
2019-05-17  6:57             ` [Xen-devel] " Jan Beulich
     [not found]           ` <5CDE5B4E020000780022FEFC@suse.com>
2019-05-17  7:48             ` Juergen Gross
2019-05-17  7:48               ` [Xen-devel] " Juergen Gross
2019-05-17  8:22               ` Jan Beulich
2019-05-17  8:22                 ` [Xen-devel] " Jan Beulich
2019-05-06  6:56 ` [PATCH RFC V2 43/45] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 44/45] xen/sched: carve out freeing sched_item memory into dedicated function Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  6:56 ` [PATCH RFC V2 45/45] xen/sched: add scheduling granularity enum Juergen Gross
2019-05-06  6:56   ` [Xen-devel] " Juergen Gross
2019-05-06  8:57   ` Jan Beulich
2019-05-06  8:57     ` [Xen-devel] " Jan Beulich
     [not found]   ` <5CCFF6F1020000780022C12B@suse.com>
     [not found]     ` <ac57c420*a72e*7570*db8f*27e4693c2755@suse.com>
2019-05-06  9:23     ` Juergen Gross
2019-05-06  9:23       ` [Xen-devel] " Juergen Gross
2019-05-06 10:01       ` Jan Beulich
2019-05-06 10:01         ` [Xen-devel] " Jan Beulich
2019-05-08 14:36         ` Juergen Gross
2019-05-08 14:36           ` [Xen-devel] " Juergen Gross
2019-05-10  8:53           ` Jan Beulich
2019-05-10  8:53             ` [Xen-devel] " Jan Beulich
     [not found]           ` <5CD53C1C020000780022D706@suse.com>
2019-05-10  9:00             ` Juergen Gross
2019-05-10  9:00               ` [Xen-devel] " Juergen Gross
2019-05-10 10:29               ` Dario Faggioli
2019-05-10 10:29                 ` [Xen-devel] " Dario Faggioli
2019-05-10 11:17               ` Jan Beulich
2019-05-10 11:17                 ` [Xen-devel] " Jan Beulich
     [not found]       ` <5CD005E7020000780022C1B5@suse.com>
2019-05-06 10:20         ` Juergen Gross
2019-05-06 10:20           ` [Xen-devel] " Juergen Gross
2019-05-06 11:58           ` Jan Beulich
2019-05-06 11:58             ` [Xen-devel] " Jan Beulich
     [not found]           ` <5CD02161020000780022C257@suse.com>
2019-05-06 12:23             ` Juergen Gross
2019-05-06 12:23               ` [Xen-devel] " Juergen Gross
2019-05-06 13:14               ` Jan Beulich
2019-05-06 13:14                 ` [Xen-devel] " Jan Beulich
2019-05-06  7:10 ` [PATCH RFC V2 00/45] xen: add core scheduling support Juergen Gross
2019-05-06  7:10   ` [Xen-devel] " Juergen Gross
     [not found] <20190506065644.7415****1****jgross@suse.com>
     [not found] <20190506065644.7415*1*jgross@suse.com>
     [not found] ` <20190506065644.7415*2*jgross@suse.com>
     [not found]   ` <1d5f7b35*304c*6a86*5f24*67b79de447dc@citrix.com>
     [not found]     ` <2ca22195*9bdb*b040*ce12*df5bb2416038@suse.com>
     [not found]       ` <0ed82a64*58e7*7ce4*afd1*22f621c0d56d@citrix.com>
     [not found]         ` <a3e3370b*a4a9*9654*368b*f8c13b7f9742@suse.com>

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190506065644.7415-11-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=konrad.wilk@oracle.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.