xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Anshul Makkar <anshul.makkar@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	David Vrabel <david.vrabel@citrix.com>
Subject: [PATCH 12/19] xen: credit2: use non-atomic cpumask and bit operations
Date: Sat, 18 Jun 2016 01:12:44 +0200	[thread overview]
Message-ID: <146620516399.29766.4671178664524331349.stgit@Solace.fritz.box> (raw)
In-Reply-To: <146620492155.29766.10321123657058307698.stgit@Solace.fritz.box>

as all the accesses to both the masks and the flags are
serialized by the runqueues locks already.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Cc: George Dunlap <george.dunlap@citrix.com>
Cc: Anshul Makkar <anshul.makkar@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
---
 xen/common/sched_credit2.c |   48 ++++++++++++++++++++++----------------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 230a512..2ca63ae 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -909,7 +909,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
                   sizeof(d),
                   (unsigned char *)&d);
     }
-    cpumask_set_cpu(ipid, &rqd->tickled);
+    __cpumask_set_cpu(ipid, &rqd->tickled);
     cpu_raise_softirq(ipid, SCHEDULE_SOFTIRQ);
 }
 
@@ -1277,7 +1277,7 @@ csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
         __runq_remove(svc);
     }
     else if ( svc->flags & CSFLAG_delayed_runq_add )
-        clear_bit(__CSFLAG_delayed_runq_add, &svc->flags);
+        __clear_bit(__CSFLAG_delayed_runq_add, &svc->flags);
 }
 
 static void
@@ -1314,7 +1314,7 @@ csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
      * after the context has been saved. */
     if ( unlikely(svc->flags & CSFLAG_scheduled) )
     {
-        set_bit(__CSFLAG_delayed_runq_add, &svc->flags);
+        __set_bit(__CSFLAG_delayed_runq_add, &svc->flags);
         goto out;
     }
 
@@ -1347,7 +1347,7 @@ csched2_context_saved(const struct scheduler *ops, struct vcpu *vc)
     BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor));
 
     /* This vcpu is now eligible to be put on the runqueue again */
-    clear_bit(__CSFLAG_scheduled, &svc->flags);
+    __clear_bit(__CSFLAG_scheduled, &svc->flags);
 
     /* If someone wants it on the runqueue, put it there. */
     /*
@@ -1357,7 +1357,7 @@ csched2_context_saved(const struct scheduler *ops, struct vcpu *vc)
      * it seems a bit pointless; especially as we have plenty of
      * bits free.
      */
-    if ( test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags)
+    if ( __test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags)
          && likely(vcpu_runnable(vc)) )
     {
         BUG_ON(__vcpu_on_runq(svc));
@@ -1399,10 +1399,10 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
 
     if ( !spin_trylock(&prv->lock) )
     {
-        if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) )
+        if ( __test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) )
         {
             d2printk("%pv -\n", svc->vcpu);
-            clear_bit(__CSFLAG_runq_migrate_request, &svc->flags);
+            __clear_bit(__CSFLAG_runq_migrate_request, &svc->flags);
         }
 
         return get_fallback_cpu(svc);
@@ -1410,7 +1410,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
 
     /* First check to see if we're here because someone else suggested a place
      * for us to move. */
-    if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) )
+    if ( __test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) )
     {
         if ( unlikely(svc->migrate_rqd->id < 0) )
         {
@@ -1545,8 +1545,8 @@ static void migrate(const struct scheduler *ops,
         d2printk("%pv %d-%d a\n", svc->vcpu, svc->rqd->id, trqd->id);
         /* It's running; mark it to migrate. */
         svc->migrate_rqd = trqd;
-        set_bit(_VPF_migrating, &svc->vcpu->pause_flags);
-        set_bit(__CSFLAG_runq_migrate_request, &svc->flags);
+        __set_bit(_VPF_migrating, &svc->vcpu->pause_flags);
+        __set_bit(__CSFLAG_runq_migrate_request, &svc->flags);
         SCHED_STAT_CRANK(migrate_requested);
     }
     else
@@ -2079,7 +2079,7 @@ csched2_schedule(
 
     /* Clear "tickled" bit now that we've been scheduled */
     if ( cpumask_test_cpu(cpu, &rqd->tickled) )
-        cpumask_clear_cpu(cpu, &rqd->tickled);
+        __cpumask_clear_cpu(cpu, &rqd->tickled);
 
     /* Update credits */
     burn_credits(rqd, scurr, now);
@@ -2115,7 +2115,7 @@ csched2_schedule(
     if ( snext != scurr
          && !is_idle_vcpu(scurr->vcpu)
          && vcpu_runnable(current) )
-        set_bit(__CSFLAG_delayed_runq_add, &scurr->flags);
+        __set_bit(__CSFLAG_delayed_runq_add, &scurr->flags);
 
     ret.migrated = 0;
 
@@ -2134,7 +2134,7 @@ csched2_schedule(
                        cpu, snext->vcpu, snext->vcpu->processor, scurr->vcpu);
                 BUG();
             }
-            set_bit(__CSFLAG_scheduled, &snext->flags);
+            __set_bit(__CSFLAG_scheduled, &snext->flags);
         }
 
         /* Check for the reset condition */
@@ -2146,7 +2146,7 @@ csched2_schedule(
 
         /* Clear the idle mask if necessary */
         if ( cpumask_test_cpu(cpu, &rqd->idle) )
-            cpumask_clear_cpu(cpu, &rqd->idle);
+            __cpumask_clear_cpu(cpu, &rqd->idle);
 
         snext->start_time = now;
 
@@ -2168,10 +2168,10 @@ csched2_schedule(
         if ( tasklet_work_scheduled )
         {
             if ( cpumask_test_cpu(cpu, &rqd->idle) )
-                cpumask_clear_cpu(cpu, &rqd->idle);
+                __cpumask_clear_cpu(cpu, &rqd->idle);
         }
         else if ( !cpumask_test_cpu(cpu, &rqd->idle) )
-            cpumask_set_cpu(cpu, &rqd->idle);
+            __cpumask_set_cpu(cpu, &rqd->idle);
         /* Make sure avgload gets updated periodically even
          * if there's no activity */
         update_load(ops, rqd, NULL, 0, now);
@@ -2347,7 +2347,7 @@ static void activate_runqueue(struct csched2_private *prv, int rqi)
     INIT_LIST_HEAD(&rqd->runq);
     spin_lock_init(&rqd->lock);
 
-    cpumask_set_cpu(rqi, &prv->active_queues);
+    __cpumask_set_cpu(rqi, &prv->active_queues);
 }
 
 static void deactivate_runqueue(struct csched2_private *prv, int rqi)
@@ -2360,7 +2360,7 @@ static void deactivate_runqueue(struct csched2_private *prv, int rqi)
     
     rqd->id = -1;
 
-    cpumask_clear_cpu(rqi, &prv->active_queues);
+    __cpumask_clear_cpu(rqi, &prv->active_queues);
 }
 
 static inline bool_t same_node(unsigned int cpua, unsigned int cpub)
@@ -2449,9 +2449,9 @@ init_pdata(struct csched2_private *prv, unsigned int cpu)
     /* Set the runqueue map */
     prv->runq_map[cpu] = rqi;
     
-    cpumask_set_cpu(cpu, &rqd->idle);
-    cpumask_set_cpu(cpu, &rqd->active);
-    cpumask_set_cpu(cpu, &prv->initialized);
+    __cpumask_set_cpu(cpu, &rqd->idle);
+    __cpumask_set_cpu(cpu, &rqd->active);
+    __cpumask_set_cpu(cpu, &prv->initialized);
 
     return rqi;
 }
@@ -2556,8 +2556,8 @@ csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 
     printk("Removing cpu %d from runqueue %d\n", cpu, rqi);
 
-    cpumask_clear_cpu(cpu, &rqd->idle);
-    cpumask_clear_cpu(cpu, &rqd->active);
+    __cpumask_clear_cpu(cpu, &rqd->idle);
+    __cpumask_clear_cpu(cpu, &rqd->active);
 
     if ( cpumask_empty(&rqd->active) )
     {
@@ -2567,7 +2567,7 @@ csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 
     spin_unlock(&rqd->lock);
 
-    cpumask_clear_cpu(cpu, &prv->initialized);
+    __cpumask_clear_cpu(cpu, &prv->initialized);
 
     spin_unlock_irqrestore(&prv->lock, flags);
 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-06-17 23:12 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-17 23:11 [PATCH 00/19] xen: sched: assorted fixes and improvements to Credit2 Dario Faggioli
2016-06-17 23:11 ` [PATCH 01/19] xen: sched: leave CPUs doing tasklet work alone Dario Faggioli
2016-06-20  7:48   ` Jan Beulich
2016-07-07 10:11     ` Dario Faggioli
2016-06-21 16:17   ` anshul makkar
2016-07-06 15:41   ` George Dunlap
2016-07-07 10:25     ` Dario Faggioli
2016-06-17 23:11 ` [PATCH 02/19] xen: sched: make the 'tickled' perf counter clearer Dario Faggioli
2016-06-18  0:36   ` Meng Xu
2016-07-06 15:52   ` George Dunlap
2016-06-17 23:11 ` [PATCH 03/19] xen: credit2: insert and tickle don't need a cpu parameter Dario Faggioli
2016-06-21 16:41   ` anshul makkar
2016-07-06 15:59   ` George Dunlap
2016-06-17 23:11 ` [PATCH 04/19] xen: credit2: kill useless helper function choose_cpu Dario Faggioli
2016-07-06 16:02   ` George Dunlap
2016-07-07 10:26     ` Dario Faggioli
2016-06-17 23:11 ` [PATCH 05/19] xen: credit2: do not warn if calling burn_credits more than once Dario Faggioli
2016-07-06 16:05   ` George Dunlap
2016-06-17 23:12 ` [PATCH 06/19] xen: credit2: read NOW() with the proper runq lock held Dario Faggioli
2016-06-20  7:56   ` Jan Beulich
2016-07-06 16:10     ` George Dunlap
2016-07-07 10:28       ` Dario Faggioli
2016-06-17 23:12 ` [PATCH 07/19] xen: credit2: prevent load balancing to go mad if time goes backwards Dario Faggioli
2016-06-20  8:02   ` Jan Beulich
2016-07-06 16:21     ` George Dunlap
2016-07-07  7:29       ` Jan Beulich
2016-07-07  9:09         ` George Dunlap
2016-07-07  9:18           ` Jan Beulich
2016-07-07 10:53             ` Dario Faggioli
2016-06-17 23:12 ` [PATCH 08/19] xen: credit2: when tickling, check idle cpus first Dario Faggioli
2016-07-06 16:36   ` George Dunlap
2016-06-17 23:12 ` [PATCH 09/19] xen: credit2: avoid calling __update_svc_load() multiple times on the same vcpu Dario Faggioli
2016-07-06 16:40   ` George Dunlap
2016-06-17 23:12 ` [PATCH 10/19] xen: credit2: rework load tracking logic Dario Faggioli
2016-07-06 17:33   ` George Dunlap
2016-06-17 23:12 ` [PATCH 11/19] tools: tracing: adapt Credit2 load tracking events to new format Dario Faggioli
2016-06-21  9:27   ` Wei Liu
2016-06-17 23:12 ` Dario Faggioli [this message]
2016-07-07  9:45   ` [PATCH 12/19] xen: credit2: use non-atomic cpumask and bit operations George Dunlap
2016-06-17 23:12 ` [PATCH 13/19] xen: credit2: make the code less experimental Dario Faggioli
2016-06-20  8:13   ` Jan Beulich
2016-07-07 10:59     ` Dario Faggioli
2016-07-07 15:17   ` George Dunlap
2016-07-07 16:43     ` Dario Faggioli
2016-06-17 23:12 ` [PATCH 14/19] xen: credit2: add yet some more tracing Dario Faggioli
2016-06-20  8:15   ` Jan Beulich
2016-07-07 15:34     ` George Dunlap
2016-07-07 15:34   ` George Dunlap
2016-06-17 23:13 ` [PATCH 15/19] xen: credit2: only marshall trace point arguments if tracing enabled Dario Faggioli
2016-07-07 15:37   ` George Dunlap
2016-06-17 23:13 ` [PATCH 16/19] tools: tracing: deal with new Credit2 events Dario Faggioli
2016-07-07 15:39   ` George Dunlap
2016-06-17 23:13 ` [PATCH 17/19] xen: credit2: the private scheduler lock can be an rwlock Dario Faggioli
2016-07-07 16:00   ` George Dunlap
2016-06-17 23:13 ` [PATCH 18/19] xen: credit2: implement SMT support independent runq arrangement Dario Faggioli
2016-06-20  8:26   ` Jan Beulich
2016-06-20 10:38     ` Dario Faggioli
2016-06-27 15:20   ` anshul makkar
2016-07-12 13:40   ` George Dunlap
2016-06-17 23:13 ` [PATCH 19/19] xen: credit2: use cpumask_first instead of cpumask_any when choosing cpu Dario Faggioli
2016-06-20  8:30   ` Jan Beulich
2016-06-20 11:28     ` Dario Faggioli
2016-06-21 10:42   ` David Vrabel
2016-07-07 16:55     ` Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=146620516399.29766.4671178664524331349.stgit@Solace.fritz.box \
    --to=dario.faggioli@citrix.com \
    --cc=anshul.makkar@citrix.com \
    --cc=david.vrabel@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).