All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Julien Grall <julien@xen.org>, Wei Liu <wl@xen.org>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Dario Faggioli <dfaggioli@suse.com>,
	Josh Whitehead <josh.whitehead@dornerworks.com>,
	Meng Xu <mengxu@cis.upenn.edu>, Jan Beulich <jbeulich@suse.com>,
	Stewart Hildebrand <stewart.hildebrand@dornerworks.com>
Subject: [Xen-devel] [PATCH v2 7/9] xen/sched: switch scheduling to bool where appropriate
Date: Wed,  8 Jan 2020 16:23:26 +0100	[thread overview]
Message-ID: <20200108152328.27194-8-jgross@suse.com> (raw)
In-Reply-To: <20200108152328.27194-1-jgross@suse.com>

Scheduling code has several places using int or bool_t instead of bool.
Switch those.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- rename bool "pos" to "first" (Dario Faggioli)
---
 xen/common/sched/arinc653.c |  8 ++++----
 xen/common/sched/core.c     | 14 +++++++-------
 xen/common/sched/cpupool.c  | 10 +++++-----
 xen/common/sched/credit.c   | 12 ++++++------
 xen/common/sched/private.h  |  2 +-
 xen/common/sched/rt.c       | 18 +++++++++---------
 xen/include/xen/sched.h     |  6 +++---
 7 files changed, 35 insertions(+), 35 deletions(-)

diff --git a/xen/common/sched/arinc653.c b/xen/common/sched/arinc653.c
index 8895d92b5e..bce8021e3f 100644
--- a/xen/common/sched/arinc653.c
+++ b/xen/common/sched/arinc653.c
@@ -75,7 +75,7 @@ typedef struct arinc653_unit_s
      * arinc653_unit_t pointer. */
     struct sched_unit * unit;
     /* awake holds whether the UNIT has been woken with vcpu_wake() */
-    bool_t              awake;
+    bool                awake;
     /* list holds the linked list information for the list this UNIT
      * is stored in */
     struct list_head    list;
@@ -427,7 +427,7 @@ a653sched_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
      * will mark the UNIT awake.
      */
     svc->unit = unit;
-    svc->awake = 0;
+    svc->awake = false;
     if ( !is_idle_unit(unit) )
         list_add(&svc->list, &SCHED_PRIV(ops)->unit_list);
     update_schedule_units(ops);
@@ -473,7 +473,7 @@ static void
 a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     if ( AUNIT(unit) != NULL )
-        AUNIT(unit)->awake = 0;
+        AUNIT(unit)->awake = false;
 
     /*
      * If the UNIT being put to sleep is the same one that is currently
@@ -493,7 +493,7 @@ static void
 a653sched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     if ( AUNIT(unit) != NULL )
-        AUNIT(unit)->awake = 1;
+        AUNIT(unit)->awake = true;
 
     cpu_raise_softirq(sched_unit_master(unit), SCHEDULE_SOFTIRQ);
 }
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 4153d110be..896f82f4d2 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -53,7 +53,7 @@ string_param("sched", opt_sched);
  * scheduler will give preferrence to partially idle package compared to
  * the full idle package, when picking pCPU to schedule vCPU.
  */
-bool_t sched_smt_power_savings = 0;
+bool sched_smt_power_savings;
 boolean_param("sched_smt_power_savings", sched_smt_power_savings);
 
 /* Default scheduling rate limit: 1ms
@@ -574,7 +574,7 @@ int sched_init_vcpu(struct vcpu *v)
     {
         get_sched_res(v->processor)->curr = unit;
         get_sched_res(v->processor)->sched_unit_idle = unit;
-        v->is_running = 1;
+        v->is_running = true;
         unit->is_running = true;
         unit->state_entry_time = NOW();
     }
@@ -983,7 +983,7 @@ static void sched_unit_migrate_finish(struct sched_unit *unit)
     unsigned long flags;
     unsigned int old_cpu, new_cpu;
     spinlock_t *old_lock, *new_lock;
-    bool_t pick_called = 0;
+    bool pick_called = false;
     struct vcpu *v;
 
     /*
@@ -1029,7 +1029,7 @@ static void sched_unit_migrate_finish(struct sched_unit *unit)
             if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, unit->domain->cpupool->cpu_valid) )
                 break;
-            pick_called = 1;
+            pick_called = true;
         }
         else
         {
@@ -1037,7 +1037,7 @@ static void sched_unit_migrate_finish(struct sched_unit *unit)
              * We do not hold the scheduler lock appropriate for this vCPU.
              * Thus we cannot select a new CPU on this iteration. Try again.
              */
-            pick_called = 0;
+            pick_called = false;
         }
 
         sched_spin_unlock_double(old_lock, new_lock, flags);
@@ -2148,7 +2148,7 @@ static void sched_switch_units(struct sched_resource *sr,
             vcpu_runstate_change(vnext, vnext->new_state, now);
         }
 
-        vnext->is_running = 1;
+        vnext->is_running = true;
 
         if ( is_idle_vcpu(vnext) )
             vnext->sched_unit = next;
@@ -2219,7 +2219,7 @@ static void vcpu_context_saved(struct vcpu *vprev, struct vcpu *vnext)
     smp_wmb();
 
     if ( vprev != vnext )
-        vprev->is_running = 0;
+        vprev->is_running = false;
 }
 
 static void unit_context_saved(struct sched_resource *sr)
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 7b31ab0d61..c1396cfff4 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -154,7 +154,7 @@ static struct cpupool *alloc_cpupool_struct(void)
  * the searched id is returned
  * returns NULL if not found.
  */
-static struct cpupool *__cpupool_find_by_id(int id, int exact)
+static struct cpupool *__cpupool_find_by_id(int id, bool exact)
 {
     struct cpupool **q;
 
@@ -169,10 +169,10 @@ static struct cpupool *__cpupool_find_by_id(int id, int exact)
 
 static struct cpupool *cpupool_find_by_id(int poolid)
 {
-    return __cpupool_find_by_id(poolid, 1);
+    return __cpupool_find_by_id(poolid, true);
 }
 
-static struct cpupool *__cpupool_get_by_id(int poolid, int exact)
+static struct cpupool *__cpupool_get_by_id(int poolid, bool exact)
 {
     struct cpupool *c;
     spin_lock(&cpupool_lock);
@@ -185,12 +185,12 @@ static struct cpupool *__cpupool_get_by_id(int poolid, int exact)
 
 struct cpupool *cpupool_get_by_id(int poolid)
 {
-    return __cpupool_get_by_id(poolid, 1);
+    return __cpupool_get_by_id(poolid, true);
 }
 
 static struct cpupool *cpupool_get_next_by_id(int poolid)
 {
-    return __cpupool_get_by_id(poolid, 0);
+    return __cpupool_get_by_id(poolid, false);
 }
 
 void cpupool_put(struct cpupool *pool)
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index 6b04f8f71c..a75efbd43d 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -245,7 +245,7 @@ __runq_elem(struct list_head *elem)
 }
 
 /* Is the first element of cpu's runq (if any) cpu's idle unit? */
-static inline bool_t is_runq_idle(unsigned int cpu)
+static inline bool is_runq_idle(unsigned int cpu)
 {
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
@@ -344,7 +344,7 @@ static void burn_credits(struct csched_unit *svc, s_time_t now)
     svc->start_time += (credits * MILLISECS(1)) / CSCHED_CREDITS_PER_MSEC;
 }
 
-static bool_t __read_mostly opt_tickle_one_idle = 1;
+static bool __read_mostly opt_tickle_one_idle = true;
 boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
 
 DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
@@ -719,7 +719,7 @@ __csched_unit_is_migrateable(const struct csched_private *prv,
 
 static int
 _csched_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit,
-                 bool_t commit)
+                 bool commit)
 {
     int cpu = sched_unit_master(unit);
     /* We must always use cpu's scratch space */
@@ -871,7 +871,7 @@ csched_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
      * get boosted, which we don't deserve as we are "only" migrating.
      */
     set_bit(CSCHED_FLAG_UNIT_MIGRATING, &svc->flags);
-    return get_sched_res(_csched_cpu_pick(ops, unit, 1));
+    return get_sched_res(_csched_cpu_pick(ops, unit, true));
 }
 
 static inline void
@@ -975,7 +975,7 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu)
          * migrating it to run elsewhere (see multi-core and multi-thread
          * support in csched_res_pick()).
          */
-        new_cpu = _csched_cpu_pick(ops, currunit, 0);
+        new_cpu = _csched_cpu_pick(ops, currunit, false);
 
         unit_schedule_unlock_irqrestore(lock, flags, currunit);
 
@@ -1108,7 +1108,7 @@ static void
 csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
-    bool_t migrating;
+    bool migrating;
 
     BUG_ON( is_idle_unit(unit) );
 
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index edce354dc7..9d0db75cbb 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -589,7 +589,7 @@ unsigned int cpupool_get_granularity(const struct cpupool *c);
  * * The hard affinity is not a subset of soft affinity
  * * There is an overlap between the soft and hard affinity masks
  */
-static inline int has_soft_affinity(const struct sched_unit *unit)
+static inline bool has_soft_affinity(const struct sched_unit *unit)
 {
     return unit->soft_aff_effective &&
            !cpumask_subset(cpupool_domain_master_cpumask(unit->domain),
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index d26f77f554..6dc4b6a6e5 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -490,13 +490,13 @@ rt_update_deadline(s_time_t now, struct rt_unit *svc)
 static inline bool
 deadline_queue_remove(struct list_head *queue, struct list_head *elem)
 {
-    int pos = 0;
+    bool first = false;
 
     if ( queue->next != elem )
-        pos = 1;
+        first = true;
 
     list_del_init(elem);
-    return !pos;
+    return !first;
 }
 
 static inline bool
@@ -505,17 +505,17 @@ deadline_queue_insert(struct rt_unit * (*qelem)(struct list_head *),
                       struct list_head *queue)
 {
     struct list_head *iter;
-    int pos = 0;
+    bool first = true;
 
     list_for_each ( iter, queue )
     {
         struct rt_unit * iter_svc = (*qelem)(iter);
         if ( compare_unit_priority(svc, iter_svc) > 0 )
             break;
-        pos++;
+        first = false;
     }
     list_add_tail(elem, iter);
-    return !pos;
+    return first;
 }
 #define deadline_runq_insert(...) \
   deadline_queue_insert(&q_elem, ##__VA_ARGS__)
@@ -605,7 +605,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct list_head *replq = rt_replq(ops);
     struct rt_unit *rearm_svc = svc;
-    bool_t rearm = 0;
+    bool rearm = false;
 
     ASSERT( unit_on_replq(svc) );
 
@@ -622,7 +622,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_unit *svc)
     {
         deadline_replq_insert(svc, &svc->replq_elem, replq);
         rearm_svc = replq_elem(replq->next);
-        rearm = 1;
+        rearm = true;
     }
     else
         rearm = deadline_replq_insert(svc, &svc->replq_elem, replq);
@@ -1279,7 +1279,7 @@ rt_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct rt_unit * const svc = rt_unit(unit);
     s_time_t now;
-    bool_t missed;
+    bool missed;
 
     BUG_ON( is_idle_unit(unit) );
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index b4c2e4f7c2..d8e961095f 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -560,18 +560,18 @@ static inline bool is_system_domain(const struct domain *d)
  * Use this when you don't have an existing reference to @d. It returns
  * FALSE if @d is being destroyed.
  */
-static always_inline int get_domain(struct domain *d)
+static always_inline bool get_domain(struct domain *d)
 {
     int old, seen = atomic_read(&d->refcnt);
     do
     {
         old = seen;
         if ( unlikely(old & DOMAIN_DESTROYED) )
-            return 0;
+            return false;
         seen = atomic_cmpxchg(&d->refcnt, old, old + 1);
     }
     while ( unlikely(seen != old) );
-    return 1;
+    return true;
 }
 
 /*
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2020-01-08 15:23 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-08 15:23 [Xen-devel] [PATCH v2 0/9] xen: scheduler cleanups Juergen Gross
2020-01-08 15:23 ` [Xen-devel] [PATCH v2 1/9] xen/sched: move schedulers and cpupool coding to dedicated directory Juergen Gross
2020-01-22 13:03   ` Dario Faggioli
2020-01-08 15:23 ` [Xen-devel] [PATCH v2 2/9] xen/sched: make sched-if.h really scheduler private Juergen Gross
2020-01-14 14:27   ` Jan Beulich
2020-01-14 14:33     ` Jürgen Groß
2020-01-14 14:39       ` Jan Beulich
2020-01-14 14:50         ` Jürgen Groß
2020-01-08 15:23 ` [Xen-devel] [PATCH v2 3/9] xen/sched: cleanup sched.h Juergen Gross
2020-01-14 15:38   ` Jan Beulich
2020-01-14 15:42     ` Jürgen Groß
2020-01-08 15:23 ` [Xen-devel] [PATCH v2 4/9] xen/sched: remove special cases for free cpus in schedulers Juergen Gross
2020-01-22 13:06   ` Dario Faggioli
2020-01-08 15:23 ` [Xen-devel] [PATCH v2 5/9] xen/sched: use scratch cpumask instead of allocating it on the stack Juergen Gross
2020-01-09  6:21   ` Meng Xu
2020-01-08 15:23 ` [Xen-devel] [PATCH v2 6/9] xen/sched: replace null scheduler percpu-variable with pdata hook Juergen Gross
2020-01-22 13:04   ` Dario Faggioli
2020-01-08 15:23 ` Juergen Gross [this message]
2020-01-09  6:23   ` [Xen-devel] [PATCH v2 7/9] xen/sched: switch scheduling to bool where appropriate Meng Xu
2020-01-22 12:56   ` Dario Faggioli
2020-01-08 15:23 ` [Xen-devel] [PATCH v2 8/9] xen/sched: eliminate sched_tick_suspend() and sched_tick_resume() Juergen Gross
2020-01-08 15:23 ` [Xen-devel] [PATCH v2 9/9] xen/sched: add const qualifier where appropriate Juergen Gross
2020-01-09  6:00   ` Meng Xu
2020-01-22 13:11 ` [Xen-devel] [PATCH v2 0/9] xen: scheduler cleanups Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200108152328.27194-8-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=josh.whitehead@dornerworks.com \
    --cc=julien@xen.org \
    --cc=konrad.wilk@oracle.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=sstabellini@kernel.org \
    --cc=stewart.hildebrand@dornerworks.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.