xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] xen: sched: rtds: refactor code
@ 2016-05-11 15:20 Tianyang Chen
  2016-05-13  3:34 ` Meng Xu
  0 siblings, 1 reply; 3+ messages in thread
From: Tianyang Chen @ 2016-05-11 15:20 UTC (permalink / raw)
  To: xen-devel; +Cc: dario.faggioli, Tianyang Chen, george.dunlap, mengxu

No functional change:
 -Various coding style fix
 -Added comments for UPDATE_LIMIT_SHIFT.

Use non-atomic bit-ops:
 -Vcpu flags are checked and cleared atomically. Performance can be
  improved with corresponding non-atomic versions since schedule.c
  already has spin_locks in place.

Suggested-by: Dario Faggioli <dario.faggioli@citrix.com>
Signed-off-by: Tianyang Chen <tiche@seas.upenn.edu>
---
 xen/common/sched_rt.c |  122 ++++++++++++++++++++++++++-----------------------
 1 file changed, 64 insertions(+), 58 deletions(-)

diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 7f8f411..1a18f6d 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -80,7 +80,7 @@
  * in schedule.c
  *
  * The functions involes RunQ and needs to grab locks are:
- *    vcpu_insert, vcpu_remove, context_saved, __runq_insert
+ *    vcpu_insert, vcpu_remove, context_saved, runq_insert
  */
 
 
@@ -107,6 +107,12 @@
  */
 #define RTDS_MIN_BUDGET     (MICROSECS(10))
 
+/*
+ * UPDATE_LIMIT_SHIT: a constant used in rt_update_deadline(). When finding
+ * the next deadline, performing addition could be faster if the difference
+ * between cur_deadline and now is small. If the difference is bigger than
+ * 1024 * period, use multiplication.
+ */
 #define UPDATE_LIMIT_SHIFT      10
 
 /*
@@ -158,25 +164,25 @@
 static void repl_timer_handler(void *data);
 
 /*
- * Systme-wide private data, include global RunQueue/DepletedQ
+ * System-wide private data, include global RunQueue/DepletedQ
  * Global lock is referenced by schedule_data.schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
-    spinlock_t lock;            /* the global coarse grand lock */
-    struct list_head sdom;      /* list of availalbe domains, used for dump */
-    struct list_head runq;      /* ordered list of runnable vcpus */
-    struct list_head depletedq; /* unordered list of depleted vcpus */
-    struct list_head replq;     /* ordered list of vcpus that need replenishment */
-    cpumask_t tickled;          /* cpus been tickled */
-    struct timer *repl_timer;   /* replenishment timer */
+    spinlock_t lock;             /* the global coarse grand lock */
+    struct list_head sdom;       /* list of availalbe domains, used for dump */
+    struct list_head runq;       /* ordered list of runnable vcpus */
+    struct list_head depletedq;  /* unordered list of depleted vcpus */
+    struct list_head replq;      /* ordered list of vcpus that need replenishment */
+    cpumask_t tickled;           /* cpus been tickled */
+    struct timer *repl_timer;    /* replenishment timer */
 };
 
 /*
  * Virtual CPU
  */
 struct rt_vcpu {
-    struct list_head q_elem;    /* on the runq/depletedq list */
+    struct list_head q_elem;     /* on the runq/depletedq list */
     struct list_head replq_elem; /* on the replenishment events list */
 
     /* Up-pointers */
@@ -188,19 +194,19 @@ struct rt_vcpu {
     s_time_t budget;
 
     /* VCPU current infomation in nanosecond */
-    s_time_t cur_budget;        /* current budget */
-    s_time_t last_start;        /* last start time */
-    s_time_t cur_deadline;      /* current deadline for EDF */
+    s_time_t cur_budget;         /* current budget */
+    s_time_t last_start;         /* last start time */
+    s_time_t cur_deadline;       /* current deadline for EDF */
 
-    unsigned flags;             /* mark __RTDS_scheduled, etc.. */
+    unsigned flags;              /* mark __RTDS_scheduled, etc.. */
 };
 
 /*
  * Domain
  */
 struct rt_dom {
-    struct list_head sdom_elem; /* link list on rt_priv */
-    struct domain *dom;         /* pointer to upper domain */
+    struct list_head sdom_elem;  /* link list on rt_priv */
+    struct domain *dom;          /* pointer to upper domain */
 };
 
 /*
@@ -241,13 +247,13 @@ static inline struct list_head *rt_replq(const struct scheduler *ops)
  * and the replenishment events queue.
  */
 static int
-__vcpu_on_q(const struct rt_vcpu *svc)
+vcpu_on_q(const struct rt_vcpu *svc)
 {
    return !list_empty(&svc->q_elem);
 }
 
 static struct rt_vcpu *
-__q_elem(struct list_head *elem)
+q_elem(struct list_head *elem)
 {
     return list_entry(elem, struct rt_vcpu, q_elem);
 }
@@ -303,7 +309,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
             svc->cur_budget,
             svc->cur_deadline,
             svc->last_start,
-            __vcpu_on_q(svc),
+            vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
             svc->flags,
             keyhandler_scratch);
@@ -339,28 +345,28 @@ rt_dump(const struct scheduler *ops)
     replq = rt_replq(ops);
 
     printk("Global RunQueue info:\n");
-    list_for_each( iter, runq )
+    list_for_each ( iter, runq )
     {
-        svc = __q_elem(iter);
+        svc = q_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Global DepletedQueue info:\n");
-    list_for_each( iter, depletedq )
+    list_for_each ( iter, depletedq )
     {
-        svc = __q_elem(iter);
+        svc = q_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Global Replenishment Events info:\n");
-    list_for_each( iter, replq )
+    list_for_each ( iter, replq )
     {
         svc = replq_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Domain info:\n");
-    list_for_each( iter, &prv->sdom )
+    list_for_each ( iter, &prv->sdom )
     {
         struct vcpu *v;
 
@@ -380,7 +386,7 @@ rt_dump(const struct scheduler *ops)
 
 /*
  * update deadline and budget when now >= cur_deadline
- * it need to be updated to the deadline of the current period
+ * it needs to be updated to the deadline of the current period
  */
 static void
 rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
@@ -463,14 +469,14 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
     return !pos;
 }
 #define deadline_runq_insert(...) \
-  deadline_queue_insert(&__q_elem, ##__VA_ARGS__)
+  deadline_queue_insert(&q_elem, ##__VA_ARGS__)
 #define deadline_replq_insert(...) \
   deadline_queue_insert(&replq_elem, ##__VA_ARGS__)
 
 static inline void
-__q_remove(struct rt_vcpu *svc)
+q_remove(struct rt_vcpu *svc)
 {
-    ASSERT( __vcpu_on_q(svc) );
+    ASSERT( vcpu_on_q(svc) );
     list_del_init(&svc->q_elem);
 }
 
@@ -506,13 +512,13 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
  * Insert svc without budget in DepletedQ unsorted;
  */
 static void
-__runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *runq = rt_runq(ops);
 
     ASSERT( spin_is_locked(&prv->lock) );
-    ASSERT( !__vcpu_on_q(svc) );
+    ASSERT( !vcpu_on_q(svc) );
     ASSERT( vcpu_on_replq(svc) );
 
     /* add svc to runq if svc still has budget */
@@ -840,12 +846,12 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
     if ( now >= svc->cur_deadline )
         rt_update_deadline(now, svc);
 
-    if ( !__vcpu_on_q(svc) && vcpu_runnable(vc) )
+    if ( !vcpu_on_q(svc) && vcpu_runnable(vc) )
     {
         replq_insert(ops, svc);
 
         if ( !vc->is_running )
-            __runq_insert(ops, svc);
+            runq_insert(ops, svc);
     }
     vcpu_schedule_unlock_irq(lock, vc);
 
@@ -867,8 +873,8 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
     BUG_ON( sdom == NULL );
 
     lock = vcpu_schedule_lock_irq(vc);
-    if ( __vcpu_on_q(svc) )
-        __q_remove(svc);
+    if ( vcpu_on_q(svc) )
+        q_remove(svc);
 
     if ( vcpu_on_replq(svc) )
         replq_remove(ops,svc);
@@ -930,7 +936,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
     if ( svc->cur_budget <= 0 )
     {
         svc->cur_budget = 0;
-        set_bit(__RTDS_depleted, &svc->flags);
+        __set_bit(__RTDS_depleted, &svc->flags);
     }
 
     /* TRACE */
@@ -955,7 +961,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
  * lock is grabbed before calling this function
  */
 static struct rt_vcpu *
-__runq_pick(const struct scheduler *ops, const cpumask_t *mask)
+runq_pick(const struct scheduler *ops, const cpumask_t *mask)
 {
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter;
@@ -964,9 +970,9 @@ __runq_pick(const struct scheduler *ops, const cpumask_t *mask)
     cpumask_t cpu_common;
     cpumask_t *online;
 
-    list_for_each(iter, runq)
+    list_for_each ( iter, runq )
     {
-        iter_svc = __q_elem(iter);
+        iter_svc = q_elem(iter);
 
         /* mask cpu_hard_affinity & cpupool & mask */
         online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
@@ -1028,7 +1034,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
     }
     else
     {
-        snext = __runq_pick(ops, cpumask_of(cpu));
+        snext = runq_pick(ops, cpumask_of(cpu));
         if ( snext == NULL )
             snext = rt_vcpu(idle_vcpu[cpu]);
 
@@ -1044,7 +1050,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
     if ( snext != scurr &&
          !is_idle_vcpu(current) &&
          vcpu_runnable(current) )
-        set_bit(__RTDS_delayed_runq_add, &scurr->flags);
+        __set_bit(__RTDS_delayed_runq_add, &scurr->flags);
 
     snext->last_start = now;
     ret.time =  -1; /* if an idle vcpu is picked */
@@ -1052,8 +1058,8 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
     {
         if ( snext != scurr )
         {
-            __q_remove(snext);
-            set_bit(__RTDS_scheduled, &snext->flags);
+            q_remove(snext);
+            __set_bit(__RTDS_scheduled, &snext->flags);
         }
         if ( snext->vcpu->processor != cpu )
         {
@@ -1081,13 +1087,13 @@ rt_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
 
     if ( curr_on_cpu(vc->processor) == vc )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
-    else if ( __vcpu_on_q(svc) )
+    else if ( vcpu_on_q(svc) )
     {
-        __q_remove(svc);
+        q_remove(svc);
         replq_remove(ops, svc);
     }
     else if ( svc->flags & RTDS_delayed_runq_add )
-        clear_bit(__RTDS_delayed_runq_add, &svc->flags);
+        __clear_bit(__RTDS_delayed_runq_add, &svc->flags);
 }
 
 /*
@@ -1201,7 +1207,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
     }
 
     /* on RunQ/DepletedQ, just update info is ok */
-    if ( unlikely(__vcpu_on_q(svc)) )
+    if ( unlikely(vcpu_on_q(svc)) )
     {
         SCHED_STAT_CRANK(vcpu_wake_onrunq);
         return;
@@ -1229,7 +1235,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
      */
     if ( unlikely(svc->flags & RTDS_scheduled) )
     {
-        set_bit(__RTDS_delayed_runq_add, &svc->flags);
+        __set_bit(__RTDS_delayed_runq_add, &svc->flags);
         /*
          * The vcpu is waking up already, and we didn't even had the time to
          * remove its next replenishment event from the replenishment queue
@@ -1245,7 +1251,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
     /* Replenishment event got cancelled when we blocked. Add it back. */
     replq_insert(ops, svc);
     /* insert svc to runq/depletedq because svc is not in queue now */
-    __runq_insert(ops, svc);
+    runq_insert(ops, svc);
 
     runq_tickle(ops, svc);
 }
@@ -1260,15 +1266,15 @@ rt_context_saved(const struct scheduler *ops, struct vcpu *vc)
     struct rt_vcpu *svc = rt_vcpu(vc);
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
 
-    clear_bit(__RTDS_scheduled, &svc->flags);
+    __clear_bit(__RTDS_scheduled, &svc->flags);
     /* not insert idle vcpu to runq */
     if ( is_idle_vcpu(vc) )
         goto out;
 
-    if ( test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
+    if ( __test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
          likely(vcpu_runnable(vc)) )
     {
-        __runq_insert(ops, svc);
+        runq_insert(ops, svc);
         runq_tickle(ops, svc);
     }
     else
@@ -1414,10 +1420,10 @@ static void repl_timer_handler(void *data){
         rt_update_deadline(now, svc);
         list_add(&svc->replq_elem, &tmp_replq);
 
-        if ( __vcpu_on_q(svc) )
+        if ( vcpu_on_q(svc) )
         {
-            __q_remove(svc);
-            __runq_insert(ops, svc);
+            q_remove(svc);
+            runq_insert(ops, svc);
         }
     }
 
@@ -1435,13 +1441,13 @@ static void repl_timer_handler(void *data){
         if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu &&
              !list_empty(runq) )
         {
-            struct rt_vcpu *next_on_runq = __q_elem(runq->next);
+            struct rt_vcpu *next_on_runq = q_elem(runq->next);
 
             if ( svc->cur_deadline > next_on_runq->cur_deadline )
                 runq_tickle(ops, next_on_runq);
         }
-        else if ( __vcpu_on_q(svc) &&
-                  test_and_clear_bit(__RTDS_depleted, &svc->flags) )
+        else if ( vcpu_on_q(svc) &&
+                  __test_and_clear_bit(__RTDS_depleted, &svc->flags) )
             runq_tickle(ops, svc);
 
         list_del(&svc->replq_elem);
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] xen: sched: rtds: refactor code
  2016-05-11 15:20 [PATCH] xen: sched: rtds: refactor code Tianyang Chen
@ 2016-05-13  3:34 ` Meng Xu
  2016-05-13  7:41   ` Dario Faggioli
  0 siblings, 1 reply; 3+ messages in thread
From: Meng Xu @ 2016-05-13  3:34 UTC (permalink / raw)
  To: Tianyang Chen; +Cc: xen-devel, Dario Faggioli, George Dunlap

Hi Tianyang

On Wed, May 11, 2016 at 11:20 AM, Tianyang Chen <tiche@seas.upenn.edu> wrote:
> No functional change:
>  -Various coding style fix
>  -Added comments for UPDATE_LIMIT_SHIFT.
>
> Use non-atomic bit-ops:
>  -Vcpu flags are checked and cleared atomically. Performance can be
>   improved with corresponding non-atomic versions since schedule.c
>   already has spin_locks in place.
>
> Suggested-by: Dario Faggioli <dario.faggioli@citrix.com>

It's better to add the link to the thread that has the suggestion.

> @@ -930,7 +936,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
>      if ( svc->cur_budget <= 0 )
>      {
>          svc->cur_budget = 0;
> -        set_bit(__RTDS_depleted, &svc->flags);
> +        __set_bit(__RTDS_depleted, &svc->flags);
>      }
>
>      /* TRACE */
> @@ -955,7 +961,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
>   * lock is grabbed before calling this function

The comment says "lock is grabbed before calling this function".
IIRC,  we use __ to represent that we grab the lock before call this function.
Then this change violates the convention.

>   */
>  static struct rt_vcpu *
> -__runq_pick(const struct scheduler *ops, const cpumask_t *mask)
> +runq_pick(const struct scheduler *ops, const cpumask_t *mask)
>  {
>      struct list_head *runq = rt_runq(ops);
>      struct list_head *iter;
> @@ -964,9 +970,9 @@ __runq_pick(const struct scheduler *ops, const cpumask_t *mask)
>      cpumask_t cpu_common;
>      cpumask_t *online;
>
> -    list_for_each(iter, runq)
> +    list_for_each ( iter, runq )
>      {
> -        iter_svc = __q_elem(iter);
> +        iter_svc = q_elem(iter);
>
>          /* mask cpu_hard_affinity & cpupool & mask */
>          online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
> @@ -1028,7 +1034,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
>      }
>      else
>      {
> -        snext = __runq_pick(ops, cpumask_of(cpu));
> +        snext = runq_pick(ops, cpumask_of(cpu));
>          if ( snext == NULL )
>              snext = rt_vcpu(idle_vcpu[cpu]);
>



Meng

-----------
Meng Xu
PhD Student in Computer and Information Science
University of Pennsylvania
http://www.cis.upenn.edu/~mengxu/

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] xen: sched: rtds: refactor code
  2016-05-13  3:34 ` Meng Xu
@ 2016-05-13  7:41   ` Dario Faggioli
  0 siblings, 0 replies; 3+ messages in thread
From: Dario Faggioli @ 2016-05-13  7:41 UTC (permalink / raw)
  To: Meng Xu, Tianyang Chen; +Cc: xen-devel, George Dunlap


[-- Attachment #1.1: Type: text/plain, Size: 3609 bytes --]

On Thu, 2016-05-12 at 23:34 -0400, Meng Xu wrote:
> On Wed, May 11, 2016 at 11:20 AM, Tianyang Chen <tiche@seas.upenn.edu
> > wrote:
> > 
> > No functional change:
> >  -Various coding style fix
> >  -Added comments for UPDATE_LIMIT_SHIFT.
> > 
Hey, Tianyang, thanks for this.

> > Use non-atomic bit-ops:
> >  -Vcpu flags are checked and cleared atomically. Performance can be
> >   improved with corresponding non-atomic versions since schedule.c
> >   already has spin_locks in place.
> > 
And this too. However, I think the patch should be split at least in
two, one doing the style/comments cleanups, the other doing the
atomic/non-atomic switch.

> > Suggested-by: Dario Faggioli <dario.faggioli@citrix.com>
> It's better to add the link to the thread that has the suggestion.
> 
Yes, suggested-by tags are not especially useful, IMHO. I'm fine with
you using it, but as Meng says, a link would be more helpful. If you
send a series, which will have a cover letter, put the link(s) in the
cover letter rather than in the changelog, as that's an important
information when reviewing, much less when looking at git-log in 5
years time. :-)

> > @@ -930,7 +936,7 @@ burn_budget(const struct scheduler *ops, struct
> > rt_vcpu *svc, s_time_t now)
> >      if ( svc->cur_budget <= 0 )
> >      {
> >          svc->cur_budget = 0;
> > -        set_bit(__RTDS_depleted, &svc->flags);
> > +        __set_bit(__RTDS_depleted, &svc->flags);
> >      }
> > 
> >      /* TRACE */
> > @@ -955,7 +961,7 @@ burn_budget(const struct scheduler *ops, struct
> > rt_vcpu *svc, s_time_t now)
> >   * lock is grabbed before calling this function
> The comment says "lock is grabbed before calling this function".
> IIRC,  we use __ to represent that we grab the lock before call this
> function.
> Then this change violates the convention.
> 
Yeah, but it was a bad convention, IMO, at least for sched_*.c files. 

In fact, it is debatable whether one or two underscore is what should
really be used.

Also, that's not the only convention that lead to the introduction of
this king of prefix (for example, I've seen many times '__' used for
indicating some sort of 'raw' part of the operation... there are
examples of this in Xen too)... Which means one never knows which one
is the valid convention at some point, about '__'! :-O

But even if we leave this aside, and consider at the '__'==>locked
rule, well, pretty much all functions in sched_rt.c are called with the
proper locks held already. Basically, since in many occasione, the lock
has been taken in schedule.c before calling the hook, all the functions
eve called by an hook --end maybe even the hook itself-- should have
the '__' prefix, which defeats the purpose of the convention itself!

So, on this, I agree with Tianyang, and I think removing the '__' is a
good thing for this source file.

If there are places where we want to particularly stress the fact that
locks should have be taken already, then either add a comment or a
'_locked' suffix to the function name. But that should be the exception
rather than the rule and, out of the top of my head, I don't recall any
cases in sched_rt.c where that would be tremendously helpful...

Thanks and Regards,
Dario
-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)


[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

[-- Attachment #2: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2016-05-13  7:42 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-05-11 15:20 [PATCH] xen: sched: rtds: refactor code Tianyang Chen
2016-05-13  3:34 ` Meng Xu
2016-05-13  7:41   ` Dario Faggioli

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).