xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: George Dunlap <george.dunlap@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Dario Faggioli <dario.faggioli@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Anshul Makkar <anshul.makkar@citrix.com>,
	Meng Xu <mengxu@cis.upenn.edu>
Subject: [PATCH 1/3] xen: Some code motion to avoid having to do forward-declaration
Date: Fri, 15 Jul 2016 19:02:00 +0100	[thread overview]
Message-ID: <1468605722-24239-1-git-send-email-george.dunlap@citrix.com> (raw)

For sched_credit2, move the vcpu insert / remove / free functions near the domain
insert / remove / alloc / free functions (and after cpu_pick).

For sched_rt, move rt_cpu_pick() further up.

This is pure code motion; no functional change.

Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
CC: Dario Faggioli <dario.faggioli@citrix.com>
CC: Anshul Makkar <anshul.makkar@citrix.com>
CC: Meng Xu <mengxu@cis.upenn.edu>
---
 xen/common/sched_credit2.c | 118 ++++++++++++++++++++++-----------------------
 xen/common/sched_rt.c      |  46 +++++++++---------
 2 files changed, 82 insertions(+), 82 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 8b95a47..3b9aa27 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -971,65 +971,6 @@ runq_deassign(const struct scheduler *ops, struct vcpu *vc)
 }
 
 static void
-csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
-{
-    struct csched2_vcpu *svc = vc->sched_priv;
-    struct csched2_dom * const sdom = svc->sdom;
-    spinlock_t *lock;
-
-    printk("%s: Inserting %pv\n", __func__, vc);
-
-    BUG_ON(is_idle_vcpu(vc));
-
-    /* Add vcpu to runqueue of initial processor */
-    lock = vcpu_schedule_lock_irq(vc);
-
-    runq_assign(ops, vc);
-
-    vcpu_schedule_unlock_irq(lock, vc);
-
-    sdom->nr_vcpus++;
-
-    SCHED_STAT_CRANK(vcpu_insert);
-
-    CSCHED2_VCPU_CHECK(vc);
-}
-
-static void
-csched2_free_vdata(const struct scheduler *ops, void *priv)
-{
-    struct csched2_vcpu *svc = priv;
-
-    xfree(svc);
-}
-
-static void
-csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
-{
-    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
-    struct csched2_dom * const sdom = svc->sdom;
-
-    BUG_ON( sdom == NULL );
-    BUG_ON( !list_empty(&svc->runq_elem) );
-
-    if ( ! is_idle_vcpu(vc) )
-    {
-        spinlock_t *lock;
-
-        SCHED_STAT_CRANK(vcpu_remove);
-
-        /* Remove from runqueue */
-        lock = vcpu_schedule_lock_irq(vc);
-
-        runq_deassign(ops, vc);
-
-        vcpu_schedule_unlock_irq(lock, vc);
-
-        svc->sdom->nr_vcpus--;
-    }
-}
-
-static void
 csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
 {
     struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
@@ -1668,6 +1609,65 @@ csched2_dom_destroy(const struct scheduler *ops, struct domain *dom)
     csched2_free_domdata(ops, CSCHED2_DOM(dom));
 }
 
+static void
+csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
+{
+    struct csched2_vcpu *svc = vc->sched_priv;
+    struct csched2_dom * const sdom = svc->sdom;
+    spinlock_t *lock;
+
+    printk("%s: Inserting %pv\n", __func__, vc);
+
+    BUG_ON(is_idle_vcpu(vc));
+
+    /* Add vcpu to runqueue of initial processor */
+    lock = vcpu_schedule_lock_irq(vc);
+
+    runq_assign(ops, vc);
+
+    vcpu_schedule_unlock_irq(lock, vc);
+
+    sdom->nr_vcpus++;
+
+    SCHED_STAT_CRANK(vcpu_insert);
+
+    CSCHED2_VCPU_CHECK(vc);
+}
+
+static void
+csched2_free_vdata(const struct scheduler *ops, void *priv)
+{
+    struct csched2_vcpu *svc = priv;
+
+    xfree(svc);
+}
+
+static void
+csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
+{
+    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_dom * const sdom = svc->sdom;
+
+    BUG_ON( sdom == NULL );
+    BUG_ON( !list_empty(&svc->runq_elem) );
+
+    if ( ! is_idle_vcpu(vc) )
+    {
+        spinlock_t *lock;
+
+        SCHED_STAT_CRANK(vcpu_remove);
+
+        /* Remove from runqueue */
+        lock = vcpu_schedule_lock_irq(vc);
+
+        runq_deassign(ops, vc);
+
+        vcpu_schedule_unlock_irq(lock, vc);
+
+        svc->sdom->nr_vcpus--;
+    }
+}
+
 /* How long should we let this vcpu run for? */
 static s_time_t
 csched2_runtime(const struct scheduler *ops, int cpu, struct csched2_vcpu *snext)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 98524a6..bd3a2a0 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -582,6 +582,29 @@ replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc)
 }
 
 /*
+ * Pick a valid CPU for the vcpu vc
+ * Valid CPU of a vcpu is intesection of vcpu's affinity
+ * and available cpus
+ */
+static int
+rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
+{
+    cpumask_t cpus;
+    cpumask_t *online;
+    int cpu;
+
+    online = cpupool_domain_cpumask(vc->domain);
+    cpumask_and(&cpus, online, vc->cpu_hard_affinity);
+
+    cpu = cpumask_test_cpu(vc->processor, &cpus)
+            ? vc->processor
+            : cpumask_cycle(vc->processor, &cpus);
+    ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) );
+
+    return cpu;
+}
+
+/*
  * Init/Free related code
  */
 static int
@@ -894,29 +917,6 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
 }
 
 /*
- * Pick a valid CPU for the vcpu vc
- * Valid CPU of a vcpu is intesection of vcpu's affinity
- * and available cpus
- */
-static int
-rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
-{
-    cpumask_t cpus;
-    cpumask_t *online;
-    int cpu;
-
-    online = cpupool_domain_cpumask(vc->domain);
-    cpumask_and(&cpus, online, vc->cpu_hard_affinity);
-
-    cpu = cpumask_test_cpu(vc->processor, &cpus)
-            ? vc->processor
-            : cpumask_cycle(vc->processor, &cpus);
-    ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) );
-
-    return cpu;
-}
-
-/*
  * Burn budget in nanosecond granularity
  */
 static void
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

             reply	other threads:[~2016-07-15 18:02 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-15 18:02 George Dunlap [this message]
2016-07-15 18:02 ` [PATCH 2/3] xen: Have schedulers revise initial placement George Dunlap
2016-07-15 18:07   ` Andrew Cooper
2016-07-16 14:12     ` Dario Faggioli
2016-07-18 18:10       ` Andrew Cooper
2016-07-18 18:55         ` Dario Faggioli
2016-07-18 21:36           ` Andrew Cooper
2016-07-19  7:14             ` Dario Faggioli
2016-07-18 10:28   ` Dario Faggioli
2016-07-25 11:17     ` George Dunlap
2016-07-25 14:36       ` Meng Xu
2016-07-26  9:17       ` Dario Faggioli
2016-07-25 14:35   ` Meng Xu
2016-08-01 10:40   ` Jan Beulich
2016-08-01 12:32     ` Dario Faggioli
2016-08-05 13:24       ` Jan Beulich
2016-08-05 14:09         ` Dario Faggioli
2016-08-05 14:44           ` Jan Beulich
2016-08-11 14:59         ` Dario Faggioli
2016-08-11 15:51           ` Andrew Cooper
2016-08-11 23:35             ` Dario Faggioli
2016-08-12  1:59         ` dependences for backporting to 4.6 [was: Re: [PATCH 2/3] xen: Have schedulers revise initial placement] Dario Faggioli
2016-08-12 13:53           ` Jan Beulich
2016-08-16 10:21             ` Dario Faggioli
2016-08-16 11:21               ` Jan Beulich
2016-08-12  8:58         ` dependences for backporting to 4.5 " Dario Faggioli
2016-07-15 18:02 ` [PATCH 3/3] xen: Remove buggy initial placement algorithm George Dunlap
2016-07-15 18:10   ` Andrew Cooper
2016-07-16 13:55   ` Dario Faggioli
2016-07-18 10:03     ` George Dunlap
2016-07-16 15:48 ` [PATCH 1/3] xen: Some code motion to avoid having to do forward-declaration Meng Xu
2016-07-18  9:58 ` Dario Faggioli
2016-07-18 10:06   ` George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1468605722-24239-1-git-send-email-george.dunlap@citrix.com \
    --to=george.dunlap@citrix.com \
    --cc=anshul.makkar@citrix.com \
    --cc=dario.faggioli@citrix.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).