All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: [PATCH v2 20/70] xen/sched: CFI hardening
Date: Mon, 14 Feb 2022 12:50:37 +0000	[thread overview]
Message-ID: <20220214125127.17985-21-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <20220214125127.17985-1-andrew.cooper3@citrix.com>

Control Flow Integrity schemes use toolchain and optionally hardware support
to help protect against call/jump/return oriented programming attacks.

Use cf_check to annotate function pointer targets for the toolchain.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Dario Faggioli <dfaggioli@suse.com>
---
 xen/common/sched/arinc653.c | 20 +++++++--------
 xen/common/sched/core.c     |  8 +++---
 xen/common/sched/credit.c   | 49 ++++++++++++++++++------------------
 xen/common/sched/credit2.c  | 51 +++++++++++++++++++-------------------
 xen/common/sched/null.c     | 60 +++++++++++++++++++++++----------------------
 xen/common/sched/rt.c       | 42 +++++++++++++++----------------
 6 files changed, 115 insertions(+), 115 deletions(-)

diff --git a/xen/common/sched/arinc653.c b/xen/common/sched/arinc653.c
index 542191822192..a82c0d7314a1 100644
--- a/xen/common/sched/arinc653.c
+++ b/xen/common/sched/arinc653.c
@@ -343,7 +343,7 @@ arinc653_sched_get(
  *                  <li> !0 = error
  *                  </ul>
  */
-static int
+static int cf_check
 a653sched_init(struct scheduler *ops)
 {
     a653sched_priv_t *prv;
@@ -366,7 +366,7 @@ a653sched_init(struct scheduler *ops)
  *
  * @param ops       Pointer to this instance of the scheduler structure
  */
-static void
+static void cf_check
 a653sched_deinit(struct scheduler *ops)
 {
     xfree(SCHED_PRIV(ops));
@@ -381,7 +381,7 @@ a653sched_deinit(struct scheduler *ops)
  *
  * @return          Pointer to the allocated data
  */
-static void *
+static void *cf_check
 a653sched_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
                       void *dd)
 {
@@ -442,7 +442,7 @@ a653sched_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
  *
  * @param ops       Pointer to this instance of the scheduler structure
  */
-static void
+static void cf_check
 a653sched_free_udata(const struct scheduler *ops, void *priv)
 {
     a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
@@ -469,7 +469,7 @@ a653sched_free_udata(const struct scheduler *ops, void *priv)
  * @param ops       Pointer to this instance of the scheduler structure
  * @param unit      Pointer to struct sched_unit
  */
-static void
+static void cf_check
 a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     if ( AUNIT(unit) != NULL )
@@ -489,7 +489,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
  * @param ops       Pointer to this instance of the scheduler structure
  * @param unit      Pointer to struct sched_unit
  */
-static void
+static void cf_check
 a653sched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     if ( AUNIT(unit) != NULL )
@@ -505,7 +505,7 @@ a653sched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
  * @param ops       Pointer to this instance of the scheduler structure
  * @param now       Current time
  */
-static void
+static void cf_check
 a653sched_do_schedule(
     const struct scheduler *ops,
     struct sched_unit *prev,
@@ -604,7 +604,7 @@ a653sched_do_schedule(
  *
  * @return          Scheduler resource to run on
  */
-static struct sched_resource *
+static struct sched_resource *cf_check
 a653sched_pick_resource(const struct scheduler *ops,
                         const struct sched_unit *unit)
 {
@@ -634,7 +634,7 @@ a653sched_pick_resource(const struct scheduler *ops,
  * @param pdata     scheduler specific PCPU data (we don't have any)
  * @param vdata     scheduler specific UNIT data of the idle unit
  */
-static spinlock_t *
+static spinlock_t *cf_check
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
@@ -656,7 +656,7 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
  * @param ops       Pointer to this instance of the scheduler structure
  * @param sc        Pointer to the scheduler operation specified by Domain 0
  */
-static int
+static int cf_check
 a653sched_adjust_global(const struct scheduler *ops,
                         struct xen_sysctl_scheduler_op *sc)
 {
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index b1836b591c0a..9e09d9befa23 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -98,13 +98,13 @@ static bool scheduler_active;
 static void sched_set_affinity(
     struct sched_unit *unit, const cpumask_t *hard, const cpumask_t *soft);
 
-static struct sched_resource *
+static struct sched_resource *cf_check
 sched_idle_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     return unit->res;
 }
 
-static void *
+static void *cf_check
 sched_idle_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
                        void *dd)
 {
@@ -112,12 +112,12 @@ sched_idle_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
     return ZERO_BLOCK_PTR;
 }
 
-static void
+static void cf_check
 sched_idle_free_udata(const struct scheduler *ops, void *priv)
 {
 }
 
-static void sched_idle_schedule(
+static void cf_check sched_idle_schedule(
     const struct scheduler *ops, struct sched_unit *unit, s_time_t now,
     bool tasklet_work_scheduled)
 {
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index 5635271f6fea..4d3bd8cba6fc 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -507,7 +507,7 @@ static inline void __runq_tickle(const struct csched_unit *new)
         SCHED_STAT_CRANK(tickled_no_cpu);
 }
 
-static void
+static void cf_check
 csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
     const struct csched_private *prv = CSCHED_PRIV(ops);
@@ -524,7 +524,7 @@ csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
     xfree(pcpu);
 }
 
-static void
+static void cf_check
 csched_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
     struct csched_private *prv = CSCHED_PRIV(ops);
@@ -566,7 +566,7 @@ csched_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
     spin_unlock_irqrestore(&prv->lock, flags);
 }
 
-static void *
+static void *cf_check
 csched_alloc_pdata(const struct scheduler *ops, int cpu)
 {
     struct csched_pcpu *spc;
@@ -615,7 +615,7 @@ init_pdata(struct csched_private *prv, struct csched_pcpu *spc, int cpu)
 }
 
 /* Change the scheduler of cpu to us (Credit). */
-static spinlock_t *
+static spinlock_t *cf_check
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
@@ -848,7 +848,7 @@ _csched_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit,
     return cpu;
 }
 
-static struct sched_resource *
+static struct sched_resource *cf_check
 csched_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct csched_unit *svc = CSCHED_UNIT(unit);
@@ -985,9 +985,8 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu)
     }
 }
 
-static void *
-csched_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
-                   void *dd)
+static void *cf_check csched_alloc_udata(
+    const struct scheduler *ops, struct sched_unit *unit, void *dd)
 {
     struct csched_unit *svc;
 
@@ -1007,7 +1006,7 @@ csched_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
     return svc;
 }
 
-static void
+static void cf_check
 csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_unit *svc = unit->priv;
@@ -1032,7 +1031,7 @@ csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
     SCHED_STAT_CRANK(unit_insert);
 }
 
-static void
+static void cf_check
 csched_free_udata(const struct scheduler *ops, void *priv)
 {
     struct csched_unit *svc = priv;
@@ -1042,7 +1041,7 @@ csched_free_udata(const struct scheduler *ops, void *priv)
     xfree(svc);
 }
 
-static void
+static void cf_check
 csched_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_private *prv = CSCHED_PRIV(ops);
@@ -1069,7 +1068,7 @@ csched_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
     BUG_ON( sdom == NULL );
 }
 
-static void
+static void cf_check
 csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
@@ -1094,7 +1093,7 @@ csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
         runq_remove(svc);
 }
 
-static void
+static void cf_check
 csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
@@ -1156,7 +1155,7 @@ csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
     __runq_tickle(svc);
 }
 
-static void
+static void cf_check
 csched_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
@@ -1165,7 +1164,7 @@ csched_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
     set_bit(CSCHED_FLAG_UNIT_YIELD, &svc->flags);
 }
 
-static int
+static int cf_check
 csched_dom_cntl(
     const struct scheduler *ops,
     struct domain *d,
@@ -1210,7 +1209,7 @@ csched_dom_cntl(
     return rc;
 }
 
-static void
+static void cf_check
 csched_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
                 const cpumask_t *hard, const cpumask_t *soft)
 {
@@ -1238,7 +1237,7 @@ __csched_set_tslice(struct csched_private *prv, unsigned int timeslice_ms)
     prv->credit = prv->credits_per_tslice * prv->ncpus;
 }
 
-static int
+static int cf_check
 csched_sys_cntl(const struct scheduler *ops,
                         struct xen_sysctl_scheduler_op *sc)
 {
@@ -1281,7 +1280,7 @@ csched_sys_cntl(const struct scheduler *ops,
     return rc;
 }
 
-static void *
+static void *cf_check
 csched_alloc_domdata(const struct scheduler *ops, struct domain *dom)
 {
     struct csched_dom *sdom;
@@ -1299,7 +1298,7 @@ csched_alloc_domdata(const struct scheduler *ops, struct domain *dom)
     return sdom;
 }
 
-static void
+static void cf_check
 csched_free_domdata(const struct scheduler *ops, void *data)
 {
     xfree(data);
@@ -1809,7 +1808,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
  * This function is in the critical path. It is designed to be simple and
  * fast for the common case.
  */
-static void csched_schedule(
+static void cf_check csched_schedule(
     const struct scheduler *ops, struct sched_unit *unit, s_time_t now,
     bool tasklet_work_scheduled)
 {
@@ -2026,7 +2025,7 @@ csched_dump_unit(const struct csched_unit *svc)
     printk("\n");
 }
 
-static void
+static void cf_check
 csched_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     const struct list_head *runq;
@@ -2079,7 +2078,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     spin_unlock_irqrestore(&prv->lock, flags);
 }
 
-static void
+static void cf_check
 csched_dump(const struct scheduler *ops)
 {
     struct list_head *iter_sdom, *iter_svc;
@@ -2143,7 +2142,7 @@ csched_dump(const struct scheduler *ops)
     spin_unlock_irqrestore(&prv->lock, flags);
 }
 
-static int __init
+static int __init cf_check
 csched_global_init(void)
 {
     if ( sched_credit_tslice_ms > XEN_SYSCTL_CSCHED_TSLICE_MAX ||
@@ -2173,7 +2172,7 @@ csched_global_init(void)
     return 0;
 }
 
-static int
+static int cf_check
 csched_init(struct scheduler *ops)
 {
     struct csched_private *prv;
@@ -2215,7 +2214,7 @@ csched_init(struct scheduler *ops)
     return 0;
 }
 
-static void
+static void cf_check
 csched_deinit(struct scheduler *ops)
 {
     struct csched_private *prv;
diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index d96e2749ddfb..0e3f89e5378e 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -2164,7 +2164,7 @@ csched2_unit_check(const struct sched_unit *unit)
 #define CSCHED2_UNIT_CHECK(unit)
 #endif
 
-static void *
+static void *cf_check
 csched2_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
                     void *dd)
 {
@@ -2208,7 +2208,7 @@ csched2_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
     return svc;
 }
 
-static void
+static void cf_check
 csched2_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched2_unit * const svc = csched2_unit(unit);
@@ -2230,7 +2230,7 @@ csched2_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
         __clear_bit(__CSFLAG_delayed_runq_add, &svc->flags);
 }
 
-static void
+static void cf_check
 csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched2_unit * const svc = csched2_unit(unit);
@@ -2285,7 +2285,7 @@ csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
     return;
 }
 
-static void
+static void cf_check
 csched2_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched2_unit * const svc = csched2_unit(unit);
@@ -2293,7 +2293,7 @@ csched2_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
     __set_bit(__CSFLAG_unit_yield, &svc->flags);
 }
 
-static void
+static void cf_check
 csched2_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched2_unit * const svc = csched2_unit(unit);
@@ -2335,7 +2335,7 @@ csched2_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 }
 
 #define MAX_LOAD (STIME_MAX)
-static struct sched_resource *
+static struct sched_resource *cf_check
 csched2_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct csched2_private *prv = csched2_priv(ops);
@@ -2867,8 +2867,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
     return;
 }
 
-static void
-csched2_unit_migrate(
+static void cf_check csched2_unit_migrate(
     const struct scheduler *ops, struct sched_unit *unit, unsigned int new_cpu)
 {
     struct csched2_unit * const svc = csched2_unit(unit);
@@ -2894,7 +2893,7 @@ csched2_unit_migrate(
         sched_set_res(unit, get_sched_res(new_cpu));
 }
 
-static int
+static int cf_check
 csched2_dom_cntl(
     const struct scheduler *ops,
     struct domain *d,
@@ -3100,7 +3099,7 @@ csched2_dom_cntl(
     return rc;
 }
 
-static void
+static void cf_check
 csched2_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
                  const cpumask_t *hard, const cpumask_t *soft)
 {
@@ -3116,8 +3115,8 @@ csched2_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
         __clear_bit(__CSFLAG_pinned, &svc->flags);
 }
 
-static int csched2_sys_cntl(const struct scheduler *ops,
-                            struct xen_sysctl_scheduler_op *sc)
+static int cf_check csched2_sys_cntl(
+    const struct scheduler *ops, struct xen_sysctl_scheduler_op *sc)
 {
     struct xen_sysctl_credit2_schedule *params = &sc->u.sched_credit2;
     struct csched2_private *prv = csched2_priv(ops);
@@ -3148,7 +3147,7 @@ static int csched2_sys_cntl(const struct scheduler *ops,
     return 0;
 }
 
-static void *
+static void *cf_check
 csched2_alloc_domdata(const struct scheduler *ops, struct domain *dom)
 {
     struct csched2_private *prv = csched2_priv(ops);
@@ -3180,7 +3179,7 @@ csched2_alloc_domdata(const struct scheduler *ops, struct domain *dom)
     return sdom;
 }
 
-static void
+static void cf_check
 csched2_free_domdata(const struct scheduler *ops, void *data)
 {
     struct csched2_dom *sdom = data;
@@ -3200,7 +3199,7 @@ csched2_free_domdata(const struct scheduler *ops, void *data)
     }
 }
 
-static void
+static void cf_check
 csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     const struct csched2_unit *svc = unit->priv;
@@ -3231,7 +3230,7 @@ csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
     CSCHED2_UNIT_CHECK(unit);
 }
 
-static void
+static void cf_check
 csched2_free_udata(const struct scheduler *ops, void *priv)
 {
     struct csched2_unit *svc = priv;
@@ -3239,7 +3238,7 @@ csched2_free_udata(const struct scheduler *ops, void *priv)
     xfree(svc);
 }
 
-static void
+static void cf_check
 csched2_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched2_unit * const svc = csched2_unit(unit);
@@ -3558,7 +3557,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
  * This function is in the critical path. It is designed to be simple and
  * fast for the common case.
  */
-static void csched2_schedule(
+static void cf_check csched2_schedule(
     const struct scheduler *ops, struct sched_unit *currunit, s_time_t now,
     bool tasklet_work_scheduled)
 {
@@ -3790,7 +3789,7 @@ dump_pcpu(const struct scheduler *ops, int cpu)
     }
 }
 
-static void
+static void cf_check
 csched2_dump(const struct scheduler *ops)
 {
     struct list_head *iter_sdom;
@@ -3898,7 +3897,7 @@ csched2_dump(const struct scheduler *ops)
     read_unlock_irqrestore(&prv->lock, flags);
 }
 
-static void *
+static void *cf_check
 csched2_alloc_pdata(const struct scheduler *ops, int cpu)
 {
     struct csched2_pcpu *spc;
@@ -3988,7 +3987,7 @@ init_pdata(struct csched2_private *prv, struct csched2_pcpu *spc,
 }
 
 /* Change the scheduler of cpu to us (Credit2). */
-static spinlock_t *
+static spinlock_t *cf_check
 csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                      void *pdata, void *vdata)
 {
@@ -4026,7 +4025,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     return &rqd->lock;
 }
 
-static void
+static void cf_check
 csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
     unsigned long flags;
@@ -4086,7 +4085,7 @@ csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
     return;
 }
 
-static void
+static void cf_check
 csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
     struct csched2_private *prv = csched2_priv(ops);
@@ -4115,7 +4114,7 @@ csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
     xfree(pcpu);
 }
 
-static int __init
+static int __init cf_check
 csched2_global_init(void)
 {
     if ( opt_load_precision_shift < LOADAVG_PRECISION_SHIFT_MIN )
@@ -4142,7 +4141,7 @@ csched2_global_init(void)
     return 0;
 }
 
-static int
+static int cf_check
 csched2_init(struct scheduler *ops)
 {
     struct csched2_private *prv;
@@ -4190,7 +4189,7 @@ csched2_init(struct scheduler *ops)
     return 0;
 }
 
-static void
+static void cf_check
 csched2_deinit(struct scheduler *ops)
 {
     struct csched2_private *prv;
diff --git a/xen/common/sched/null.c b/xen/common/sched/null.c
index 82d5d1baab85..65a0a6c5312d 100644
--- a/xen/common/sched/null.c
+++ b/xen/common/sched/null.c
@@ -130,7 +130,7 @@ static inline bool unit_check_affinity(struct sched_unit *unit,
     return cpumask_test_cpu(cpu, cpumask_scratch_cpu(cpu));
 }
 
-static int null_init(struct scheduler *ops)
+static int cf_check null_init(struct scheduler *ops)
 {
     struct null_private *prv;
 
@@ -152,7 +152,7 @@ static int null_init(struct scheduler *ops)
     return 0;
 }
 
-static void null_deinit(struct scheduler *ops)
+static void cf_check null_deinit(struct scheduler *ops)
 {
     xfree(ops->sched_data);
     ops->sched_data = NULL;
@@ -166,7 +166,8 @@ static void init_pdata(struct null_private *prv, struct null_pcpu *npc,
     npc->unit = NULL;
 }
 
-static void null_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
+static void cf_check null_deinit_pdata(
+    const struct scheduler *ops, void *pcpu, int cpu)
 {
     struct null_private *prv = null_priv(ops);
     struct null_pcpu *npc = pcpu;
@@ -177,7 +178,7 @@ static void null_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
     npc->unit = NULL;
 }
 
-static void *null_alloc_pdata(const struct scheduler *ops, int cpu)
+static void *cf_check null_alloc_pdata(const struct scheduler *ops, int cpu)
 {
     struct null_pcpu *npc;
 
@@ -188,13 +189,14 @@ static void *null_alloc_pdata(const struct scheduler *ops, int cpu)
     return npc;
 }
 
-static void null_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
+static void cf_check null_free_pdata(
+    const struct scheduler *ops, void *pcpu, int cpu)
 {
     xfree(pcpu);
 }
 
-static void *null_alloc_udata(const struct scheduler *ops,
-                              struct sched_unit *unit, void *dd)
+static void *cf_check null_alloc_udata(
+    const struct scheduler *ops, struct sched_unit *unit, void *dd)
 {
     struct null_unit *nvc;
 
@@ -210,15 +212,15 @@ static void *null_alloc_udata(const struct scheduler *ops,
     return nvc;
 }
 
-static void null_free_udata(const struct scheduler *ops, void *priv)
+static void cf_check null_free_udata(const struct scheduler *ops, void *priv)
 {
     struct null_unit *nvc = priv;
 
     xfree(nvc);
 }
 
-static void * null_alloc_domdata(const struct scheduler *ops,
-                                 struct domain *d)
+static void *cf_check null_alloc_domdata(
+    const struct scheduler *ops, struct domain *d)
 {
     struct null_private *prv = null_priv(ops);
     struct null_dom *ndom;
@@ -237,7 +239,7 @@ static void * null_alloc_domdata(const struct scheduler *ops,
     return ndom;
 }
 
-static void null_free_domdata(const struct scheduler *ops, void *data)
+static void cf_check null_free_domdata(const struct scheduler *ops, void *data)
 {
     struct null_dom *ndom = data;
     struct null_private *prv = null_priv(ops);
@@ -426,9 +428,8 @@ static bool unit_deassign(struct null_private *prv, const struct sched_unit *uni
 }
 
 /* Change the scheduler of cpu to us (null). */
-static spinlock_t *null_switch_sched(struct scheduler *new_ops,
-                                     unsigned int cpu,
-                                     void *pdata, void *vdata)
+static spinlock_t *cf_check null_switch_sched(
+    struct scheduler *new_ops, unsigned int cpu, void *pdata, void *vdata)
 {
     struct sched_resource *sr = get_sched_res(cpu);
     struct null_private *prv = null_priv(new_ops);
@@ -450,8 +451,8 @@ static spinlock_t *null_switch_sched(struct scheduler *new_ops,
     return &sr->_lock;
 }
 
-static void null_unit_insert(const struct scheduler *ops,
-                             struct sched_unit *unit)
+static void cf_check null_unit_insert(
+    const struct scheduler *ops, struct sched_unit *unit)
 {
     struct null_private *prv = null_priv(ops);
     struct null_unit *nvc = null_unit(unit);
@@ -516,8 +517,8 @@ static void null_unit_insert(const struct scheduler *ops,
     SCHED_STAT_CRANK(unit_insert);
 }
 
-static void null_unit_remove(const struct scheduler *ops,
-                             struct sched_unit *unit)
+static void cf_check null_unit_remove(
+    const struct scheduler *ops, struct sched_unit *unit)
 {
     struct null_private *prv = null_priv(ops);
     struct null_unit *nvc = null_unit(unit);
@@ -556,8 +557,8 @@ static void null_unit_remove(const struct scheduler *ops,
     SCHED_STAT_CRANK(unit_remove);
 }
 
-static void null_unit_wake(const struct scheduler *ops,
-                           struct sched_unit *unit)
+static void cf_check null_unit_wake(
+    const struct scheduler *ops, struct sched_unit *unit)
 {
     struct null_private *prv = null_priv(ops);
     struct null_unit *nvc = null_unit(unit);
@@ -632,8 +633,8 @@ static void null_unit_wake(const struct scheduler *ops,
         cpumask_raise_softirq(cpumask_scratch_cpu(cpu), SCHEDULE_SOFTIRQ);
 }
 
-static void null_unit_sleep(const struct scheduler *ops,
-                            struct sched_unit *unit)
+static void cf_check null_unit_sleep(
+    const struct scheduler *ops, struct sched_unit *unit)
 {
     struct null_private *prv = null_priv(ops);
     unsigned int cpu = sched_unit_master(unit);
@@ -667,15 +668,15 @@ static void null_unit_sleep(const struct scheduler *ops,
     SCHED_STAT_CRANK(unit_sleep);
 }
 
-static struct sched_resource *
+static struct sched_resource *cf_check
 null_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     ASSERT(!is_idle_unit(unit));
     return pick_res(null_priv(ops), unit);
 }
 
-static void null_unit_migrate(const struct scheduler *ops,
-                              struct sched_unit *unit, unsigned int new_cpu)
+static void cf_check null_unit_migrate(
+    const struct scheduler *ops, struct sched_unit *unit, unsigned int new_cpu)
 {
     struct null_private *prv = null_priv(ops);
     struct null_unit *nvc = null_unit(unit);
@@ -801,8 +802,9 @@ static inline void null_unit_check(struct sched_unit *unit)
  *  - the unit assigned to the pCPU, if there's one and it can run;
  *  - the idle unit, otherwise.
  */
-static void null_schedule(const struct scheduler *ops, struct sched_unit *prev,
-                          s_time_t now, bool tasklet_work_scheduled)
+static void cf_check null_schedule(
+    const struct scheduler *ops, struct sched_unit *prev, s_time_t now,
+    bool tasklet_work_scheduled)
 {
     unsigned int bs;
     const unsigned int cur_cpu = smp_processor_id();
@@ -939,7 +941,7 @@ static inline void dump_unit(const struct null_private *prv,
                                 sched_unit_master(nvc->unit) : -1);
 }
 
-static void null_dump_pcpu(const struct scheduler *ops, int cpu)
+static void cf_check null_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct null_private *prv = null_priv(ops);
     const struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
@@ -968,7 +970,7 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
     pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
 }
 
-static void null_dump(const struct scheduler *ops)
+static void cf_check null_dump(const struct scheduler *ops)
 {
     struct null_private *prv = null_priv(ops);
     struct list_head *iter;
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index 5ea6f01f263c..d6de25531b3c 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -269,13 +269,13 @@ unit_on_q(const struct rt_unit *svc)
    return !list_empty(&svc->q_elem);
 }
 
-static struct rt_unit *
+static struct rt_unit *cf_check
 q_elem(struct list_head *elem)
 {
     return list_entry(elem, struct rt_unit, q_elem);
 }
 
-static struct rt_unit *
+static struct rt_unit *cf_check
 replq_elem(struct list_head *elem)
 {
     return list_entry(elem, struct rt_unit, replq_elem);
@@ -348,7 +348,7 @@ rt_dump_unit(const struct scheduler *ops, const struct rt_unit *svc)
             svc->flags, CPUMASK_PR(mask));
 }
 
-static void
+static void cf_check
 rt_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
@@ -366,7 +366,7 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu)
     spin_unlock_irqrestore(&prv->lock, flags);
 }
 
-static void
+static void cf_check
 rt_dump(const struct scheduler *ops)
 {
     struct list_head *runq, *depletedq, *replq, *iter;
@@ -636,7 +636,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_unit *svc)
  * Valid resource of an unit is intesection of unit's affinity
  * and available resources
  */
-static struct sched_resource *
+static struct sched_resource *cf_check
 rt_res_pick_locked(const struct sched_unit *unit, unsigned int locked_cpu)
 {
     cpumask_t *cpus = cpumask_scratch_cpu(locked_cpu);
@@ -659,7 +659,7 @@ rt_res_pick_locked(const struct sched_unit *unit, unsigned int locked_cpu)
  * Valid resource of an unit is intesection of unit's affinity
  * and available resources
  */
-static struct sched_resource *
+static struct sched_resource *cf_check
 rt_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct sched_resource *res;
@@ -672,7 +672,7 @@ rt_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 /*
  * Init/Free related code
  */
-static int
+static int cf_check
 rt_init(struct scheduler *ops)
 {
     int rc = -ENOMEM;
@@ -701,7 +701,7 @@ rt_init(struct scheduler *ops)
     return rc;
 }
 
-static void
+static void cf_check
 rt_deinit(struct scheduler *ops)
 {
     struct rt_private *prv = rt_priv(ops);
@@ -714,7 +714,7 @@ rt_deinit(struct scheduler *ops)
 }
 
 /* Change the scheduler of cpu to us (RTDS). */
-static spinlock_t *
+static spinlock_t *cf_check
 rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                 void *pdata, void *vdata)
 {
@@ -750,7 +750,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     return &prv->lock;
 }
 
-static void
+static void cf_check
 rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
     unsigned long flags;
@@ -782,7 +782,7 @@ rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
     spin_unlock_irqrestore(&prv->lock, flags);
 }
 
-static void *
+static void *cf_check
 rt_alloc_domdata(const struct scheduler *ops, struct domain *dom)
 {
     unsigned long flags;
@@ -804,7 +804,7 @@ rt_alloc_domdata(const struct scheduler *ops, struct domain *dom)
     return sdom;
 }
 
-static void
+static void cf_check
 rt_free_domdata(const struct scheduler *ops, void *data)
 {
     struct rt_dom *sdom = data;
@@ -822,7 +822,7 @@ rt_free_domdata(const struct scheduler *ops, void *data)
     }
 }
 
-static void *
+static void * cf_check
 rt_alloc_udata(const struct scheduler *ops, struct sched_unit *unit, void *dd)
 {
     struct rt_unit *svc;
@@ -850,7 +850,7 @@ rt_alloc_udata(const struct scheduler *ops, struct sched_unit *unit, void *dd)
     return svc;
 }
 
-static void
+static void cf_check
 rt_free_udata(const struct scheduler *ops, void *priv)
 {
     struct rt_unit *svc = priv;
@@ -865,7 +865,7 @@ rt_free_udata(const struct scheduler *ops, void *priv)
  * It inserts units of moving domain to the scheduler's RunQ in
  * dest. cpupool.
  */
-static void
+static void cf_check
 rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct rt_unit *svc = rt_unit(unit);
@@ -901,7 +901,7 @@ rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 /*
  * Remove rt_unit svc from the old scheduler in source cpupool.
  */
-static void
+static void cf_check
 rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct rt_unit * const svc = rt_unit(unit);
@@ -1042,7 +1042,7 @@ runq_pick(const struct scheduler *ops, const cpumask_t *mask, unsigned int cpu)
  * schedule function for rt scheduler.
  * The lock is already grabbed in schedule.c, no need to lock here
  */
-static void
+static void cf_check
 rt_schedule(const struct scheduler *ops, struct sched_unit *currunit,
             s_time_t now, bool tasklet_work_scheduled)
 {
@@ -1129,7 +1129,7 @@ rt_schedule(const struct scheduler *ops, struct sched_unit *currunit,
  * Remove UNIT from RunQ
  * The lock is already grabbed in schedule.c, no need to lock here
  */
-static void
+static void cf_check
 rt_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct rt_unit * const svc = rt_unit(unit);
@@ -1244,7 +1244,7 @@ runq_tickle(const struct scheduler *ops, const struct rt_unit *new)
  * The lock is already grabbed in schedule.c, no need to lock here
  * TODO: what if these two units belongs to the same domain?
  */
-static void
+static void cf_check
 rt_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct rt_unit * const svc = rt_unit(unit);
@@ -1314,7 +1314,7 @@ rt_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
  * scurr has finished context switch, insert it back to the RunQ,
  * and then pick the highest priority unit from runq to run
  */
-static void
+static void cf_check
 rt_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct rt_unit *svc = rt_unit(unit);
@@ -1341,7 +1341,7 @@ rt_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 /*
  * set/get each unit info of each domain
  */
-static int
+static int cf_check
 rt_dom_cntl(
     const struct scheduler *ops,
     struct domain *d,
-- 
2.11.0



  parent reply	other threads:[~2022-02-14 13:05 UTC|newest]

Thread overview: 123+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-14 12:50 [PATCH v2 00/70] x86: Support for CET Indirect Branch Tracking Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 01/70] xen/domain: Improve pirq handling Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 02/70] xen/sort: Switch to an extern inline implementation Andrew Cooper
2022-02-14 13:13   ` Bertrand Marquis
2022-02-14 18:30     ` Andrew Cooper
2022-02-14 13:17   ` Julien Grall
2022-02-16  3:46     ` Stefano Stabellini
2022-02-16  9:29       ` Bertrand Marquis
2022-02-16 10:44       ` Andrew Cooper
2022-02-16 11:46         ` Julien Grall
2022-02-16 11:55           ` Bertrand Marquis
2022-02-14 12:50 ` [PATCH v2 03/70] xen/xsm: Move {do,compat}_flask_op() declarations into a header Andrew Cooper
2022-02-14 14:36   ` Daniel P. Smith
2022-02-14 12:50 ` [PATCH v2 04/70] x86/pv-shim: Don't modify the hypercall table Andrew Cooper
2022-02-14 13:33   ` Jan Beulich
2022-02-14 13:50     ` Andrew Cooper
2022-02-14 13:56       ` Jan Beulich
2022-02-16 22:17         ` Andrew Cooper
2022-02-17 10:20           ` Jan Beulich
2022-02-17 10:34             ` Juergen Gross
2022-02-21 19:21             ` Andrew Cooper
2022-02-22  8:41               ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 05/70] x86: Don't use the hypercall table for calling compat hypercalls Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 06/70] x86: Introduce support for CET-IBT Andrew Cooper
2022-02-15 14:01   ` Jan Beulich
2022-02-16 21:54     ` Andrew Cooper
2022-02-17 11:32       ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 07/70] x86: Build check for embedded endbr64 instructions Andrew Cooper
2022-02-15 15:12   ` Jan Beulich
2022-02-15 17:52     ` Andrew Cooper
2022-02-16  8:41       ` Jan Beulich
2022-02-16 11:55         ` Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 08/70] xen: CFI hardening for x86 hypercalls Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 09/70] xen: CFI hardening for custom_param() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 10/70] xen: CFI hardening for __initcall() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 11/70] xen: CFI hardening for notifier callbacks Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 12/70] xen: CFI hardening for acpi_table_parse() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 13/70] xen: CFI hardening for continue_hypercall_on_cpu() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 14/70] xen: CFI hardening for init_timer() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 15/70] xen: CFI hardening for call_rcu() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 16/70] xen: CFI hardening for IPIs Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 17/70] xen: CFI hardening for open_softirq() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 18/70] xsm/flask/ss: CFI hardening Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 19/70] xsm: " Andrew Cooper
2022-02-14 12:50 ` Andrew Cooper [this message]
2022-02-14 12:50 ` [PATCH v2 21/70] xen/evtchn: " Andrew Cooper
2022-02-14 16:53   ` David Vrabel
2022-02-14 16:59     ` Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 22/70] xen/hypfs: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 23/70] xen/tasklet: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 24/70] xen/keyhandler: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 25/70] xen/vpci: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 26/70] xen/decompress: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 27/70] xen/iommu: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 28/70] xen/video: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 29/70] xen/console: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 30/70] xen/misc: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 31/70] x86: CFI hardening for request_irq() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 32/70] x86/hvm: CFI hardening for hvm_funcs Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 33/70] x86/hvm: CFI hardening for device emulation Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 34/70] x86/emul: CFI hardening Andrew Cooper
2022-02-14 13:38   ` Jan Beulich
2022-02-15 13:43     ` Andrew Cooper
2022-02-15 14:13       ` Jan Beulich
2022-02-16 21:34         ` Andrew Cooper
2022-02-17 11:49           ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 35/70] x86/ucode: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 36/70] x86/power: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 37/70] x86/apic: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 38/70] x86/nmi: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 39/70] x86/mtrr: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 40/70] x86/idle: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 41/70] x86/quirks: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 42/70] x86/hvmsave: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 43/70] x86/mce: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 44/70] x86/pmu: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 45/70] x86/cpu: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 46/70] x86/guest: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 47/70] x86/logdirty: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 48/70] x86/shadow: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 49/70] x86/hap: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 50/70] x86/p2m: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 51/70] x86/irq: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 52/70] x86/apei: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 53/70] x86/psr: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 54/70] x86/dpci: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 55/70] x86/pt: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 56/70] x86/time: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 57/70] x86/misc: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 58/70] x86/stack: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 59/70] x86/bugframe: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 60/70] x86: Use control flow typechecking where possible Andrew Cooper
2022-02-15 16:26   ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 61/70] x86/setup: Read CR4 earlier in __start_xen() Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 62/70] x86/alternatives: Clear CR4.CET when clearing CR0.WP Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 63/70] x86/traps: Rework write_stub_trampoline() to not hardcode the jmp Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 64/70] x86: Introduce helpers/checks for endbr64 instructions Andrew Cooper
2022-02-14 16:14   ` Andrew Cooper
2022-02-15 16:31   ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 65/70] x86/emul: Update emulation stubs to be CET-IBT compatible Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 66/70] x86/entry: Make syscall/sysenter entrypoints " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 67/70] x86/entry: Make IDT " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 68/70] x86/setup: Rework MSR_S_CET handling for CET-IBT Andrew Cooper
2022-02-15 16:46   ` Jan Beulich
2022-02-15 20:58     ` Andrew Cooper
2022-02-16  8:49       ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 69/70] x86/efi: Disable CET-IBT around Runtime Services calls Andrew Cooper
2022-02-15 16:53   ` Jan Beulich
2022-02-15 23:00     ` Andrew Cooper
2022-02-16  9:14       ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 70/70] x86: Enable CET Indirect Branch Tracking Andrew Cooper
2022-02-14 13:10 ` [PATCH v2 00/70] x86: Support for " Andrew Cooper
2022-02-14 13:43   ` Jan Beulich
2022-02-14 14:15     ` Andrew Cooper
2022-02-14 14:38       ` Jan Beulich
2022-02-16 21:59         ` Andrew Cooper
2022-02-17  9:56           ` Jan Beulich
2022-02-17 10:01 ` [PATCH v2.1 6.5/70] x86/kexec: Annotate embedded data with ELF metadata Andrew Cooper
2022-02-17 10:42   ` Jan Beulich
2022-02-17 12:06     ` Andrew Cooper
2022-02-17 14:48       ` Jan Beulich
2022-02-17 16:06         ` Andrew Cooper
2022-02-17 16:16           ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220214125127.17985-21-andrew.cooper3@citrix.com \
    --to=andrew.cooper3@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.