All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 2/2] xen: sched: improve robustness (and rename) DOM2OP()
Date: Fri, 17 Mar 2017 19:19:44 +0100	[thread overview]
Message-ID: <148977478448.22479.13625390869019347980.stgit@Palanthas.fritz.box> (raw)
In-Reply-To: <148977465656.22479.5382577625088079334.stgit@Palanthas.fritz.box>

Clarify and enforce (with ASSERTs) when the function
is called on the idle domain, and explain in comments
what it means and when it is ok to do so.

While there, change the name of the function to a more
self-explanatory one, and do the same to VCPU2OP.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Cc: George Dunlap <george.dunlap@citrix.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Jan Beulich <jbeulich@suse.com>
---
Changes from v1:
 - new patch;
 - renamed VCPU2OP, as suggested during v1's review of patch 1.
---
 xen/common/schedule.c |   56 ++++++++++++++++++++++++++++++++-----------------
 1 file changed, 37 insertions(+), 19 deletions(-)

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index d344b7c..fdb8ff4 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -77,8 +77,25 @@ static struct scheduler __read_mostly ops;
          (( (opsptr)->fn != NULL ) ? (opsptr)->fn(opsptr, ##__VA_ARGS__ )  \
           : (typeof((opsptr)->fn(opsptr, ##__VA_ARGS__)))0 )
 
-#define DOM2OP(_d)    (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched))
-static inline struct scheduler *VCPU2OP(const struct vcpu *v)
+static inline struct scheduler *dom_get_scheduler(const struct domain *d)
+{
+    if ( likely(d->cpupool != NULL) )
+        return d->cpupool->sched;
+
+    /*
+     * If d->cpupool is NULL, this is the idle domain. This is special
+     * because the idle domain does not really bolong to any cpupool, and,
+     * hence, does not really have a scheduler.
+     *
+     * This is (should be!) only called like this for allocating the idle
+     * vCPUs for the first time, during boot, in which case what we want
+     * is the default scheduler that has been, choosen at boot.
+     */
+    ASSERT(is_idle_domain(d));
+    return &ops;
+}
+
+static inline struct scheduler *vcpu_get_scheduler(const struct vcpu *v)
 {
     struct domain *d = v->domain;
 
@@ -260,7 +277,8 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     init_timer(&v->poll_timer, poll_timer_fn,
                v, v->processor);
 
-    v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
+    v->sched_priv = SCHED_OP(dom_get_scheduler(d), alloc_vdata, v,
+		             d->sched_priv);
     if ( v->sched_priv == NULL )
         return 1;
 
@@ -272,7 +290,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     }
     else
     {
-        SCHED_OP(DOM2OP(d), insert_vcpu, v);
+        SCHED_OP(dom_get_scheduler(d), insert_vcpu, v);
     }
 
     return 0;
@@ -326,7 +344,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
     domain_pause(d);
 
-    old_ops = DOM2OP(d);
+    old_ops = dom_get_scheduler(d);
     old_domdata = d->sched_priv;
 
     for_each_vcpu ( d, v )
@@ -389,8 +407,8 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
         atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
-    SCHED_OP(VCPU2OP(v), remove_vcpu, v);
-    SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
+    SCHED_OP(vcpu_get_scheduler(v), remove_vcpu, v);
+    SCHED_OP(vcpu_get_scheduler(v), free_vdata, v->sched_priv);
 }
 
 int sched_init_domain(struct domain *d, int poolid)
@@ -404,7 +422,7 @@ int sched_init_domain(struct domain *d, int poolid)
 
     SCHED_STAT_CRANK(dom_init);
     TRACE_1D(TRC_SCHED_DOM_ADD, d->domain_id);
-    return SCHED_OP(DOM2OP(d), init_domain, d);
+    return SCHED_OP(dom_get_scheduler(d), init_domain, d);
 }
 
 void sched_destroy_domain(struct domain *d)
@@ -413,7 +431,7 @@ void sched_destroy_domain(struct domain *d)
 
     SCHED_STAT_CRANK(dom_destroy);
     TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
-    SCHED_OP(DOM2OP(d), destroy_domain, d);
+    SCHED_OP(dom_get_scheduler(d), destroy_domain, d);
 
     cpupool_rm_domain(d);
 }
@@ -432,7 +450,7 @@ void vcpu_sleep_nosync(struct vcpu *v)
         if ( v->runstate.state == RUNSTATE_runnable )
             vcpu_runstate_change(v, RUNSTATE_offline, NOW());
 
-        SCHED_OP(VCPU2OP(v), sleep, v);
+        SCHED_OP(vcpu_get_scheduler(v), sleep, v);
     }
 
     vcpu_schedule_unlock_irqrestore(lock, flags, v);
@@ -461,7 +479,7 @@ void vcpu_wake(struct vcpu *v)
     {
         if ( v->runstate.state >= RUNSTATE_blocked )
             vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
-        SCHED_OP(VCPU2OP(v), wake, v);
+        SCHED_OP(vcpu_get_scheduler(v), wake, v);
     }
     else if ( !(v->pause_flags & VPF_blocked) )
     {
@@ -516,8 +534,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
      * Actual CPU switch to new CPU.  This is safe because the lock
      * pointer cant' change while the current lock is held.
      */
-    if ( VCPU2OP(v)->migrate )
-        SCHED_OP(VCPU2OP(v), migrate, v, new_cpu);
+    if ( vcpu_get_scheduler(v)->migrate )
+        SCHED_OP(vcpu_get_scheduler(v), migrate, v, new_cpu);
     else
         v->processor = new_cpu;
 }
@@ -583,7 +601,7 @@ static void vcpu_migrate(struct vcpu *v)
                 break;
 
             /* Select a new CPU. */
-            new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+            new_cpu = SCHED_OP(vcpu_get_scheduler(v), pick_cpu, v);
             if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -685,7 +703,7 @@ void restore_vcpu_affinity(struct domain *d)
         spin_unlock_irq(lock);;
 
         lock = vcpu_schedule_lock_irq(v);
-        v->processor = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+        v->processor = SCHED_OP(vcpu_get_scheduler(v), pick_cpu, v);
         spin_unlock_irq(lock);
     }
 
@@ -975,7 +993,7 @@ long vcpu_yield(void)
     struct vcpu * v=current;
     spinlock_t *lock = vcpu_schedule_lock_irq(v);
 
-    SCHED_OP(VCPU2OP(v), yield, v);
+    SCHED_OP(vcpu_get_scheduler(v), yield, v);
     vcpu_schedule_unlock_irq(lock, v);
 
     SCHED_STAT_CRANK(vcpu_yield);
@@ -1288,7 +1306,7 @@ long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op)
     if ( ret )
         return ret;
 
-    if ( op->sched_id != DOM2OP(d)->sched_id )
+    if ( op->sched_id != dom_get_scheduler(d)->sched_id )
         return -EINVAL;
 
     switch ( op->cmd )
@@ -1304,7 +1322,7 @@ long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op)
 
     /* NB: the pluggable scheduler code needs to take care
      * of locking by itself. */
-    if ( (ret = SCHED_OP(DOM2OP(d), adjust, d, op)) == 0 )
+    if ( (ret = SCHED_OP(dom_get_scheduler(d), adjust, d, op)) == 0 )
         TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
 
     return ret;
@@ -1482,7 +1500,7 @@ void context_saved(struct vcpu *prev)
     /* Check for migration request /after/ clearing running flag. */
     smp_mb();
 
-    SCHED_OP(VCPU2OP(prev), context_saved, prev);
+    SCHED_OP(vcpu_get_scheduler(prev), context_saved, prev);
 
     if ( unlikely(prev->pause_flags & VPF_migrating) )
         vcpu_migrate(prev);


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-03-17 18:19 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-17 18:19 [PATCH v2 0/2] xen: sched: don't call hooks of the wrong scheduler via VCPU2OP Dario Faggioli
2017-03-17 18:19 ` [PATCH v2 1/2] " Dario Faggioli
2017-03-17 18:27   ` Juergen Gross
2017-03-17 18:29   ` Juergen Gross
2017-03-20  8:01     ` Jan Beulich
2017-03-27 11:48   ` George Dunlap
2017-03-27 11:49   ` George Dunlap
2017-03-27 15:07     ` Dario Faggioli
2017-03-17 18:19 ` Dario Faggioli [this message]
2017-03-27 13:23   ` [PATCH v2 2/2] xen: sched: improve robustness (and rename) DOM2OP() George Dunlap
2017-03-27 15:13     ` Dario Faggioli
2017-04-06 10:59     ` Dario Faggioli
2017-04-06 11:06       ` Juergen Gross
2017-04-07  0:31         ` Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=148977478448.22479.13625390869019347980.stgit@Palanthas.fritz.box \
    --to=dario.faggioli@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jgross@suse.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.