All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jeff Kubascik <jeff.kubascik@dornerworks.com>
To: xen-devel@lists.xenproject.org
Cc: xen-devel@dornerworks.com,
	Josh Whitehead <josh.whitehead@dornerworks.com>,
	Stewart Hildebrand <stewart.hildebrand@dornerworks.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Dario Faggioli <dfaggioli@suse.com>,
	Jeff Kubascik <jeff.kubascik@dornerworks.com>
Subject: [PATCH 4/5] sched/arinc653: Reorganize function definition order
Date: Wed, 16 Sep 2020 14:18:53 -0400	[thread overview]
Message-ID: <20200916181854.75563-5-jeff.kubascik@dornerworks.com> (raw)
In-Reply-To: <20200916181854.75563-1-jeff.kubascik@dornerworks.com>

This change is in preperation for an overhaul of the arinc653 module. It
groups functions in a logical order and fills out the sched_arinc653_def
structure. There are no functional changes.

Signed-off-by: Jeff Kubascik <jeff.kubascik@dornerworks.com>
---
 xen/common/sched/arinc653.c | 239 +++++++++++++++++++-----------------
 1 file changed, 123 insertions(+), 116 deletions(-)

diff --git a/xen/common/sched/arinc653.c b/xen/common/sched/arinc653.c
index 5f3a1be990..0cd39d475f 100644
--- a/xen/common/sched/arinc653.c
+++ b/xen/common/sched/arinc653.c
@@ -144,96 +144,6 @@ static void update_schedule_units(const struct scheduler *ops)
                       SCHED_PRIV(ops)->schedule[i].unit_id);
 }
 
-static int a653sched_set(const struct scheduler *ops,
-                         struct xen_sysctl_arinc653_schedule *schedule)
-{
-    struct a653sched_private *sched_priv = SCHED_PRIV(ops);
-    s_time_t total_runtime = 0;
-    unsigned int i;
-    unsigned long flags;
-    int rc = -EINVAL;
-
-    spin_lock_irqsave(&sched_priv->lock, flags);
-
-    /* Check for valid major frame and number of schedule entries */
-    if ( (schedule->major_frame <= 0)
-         || (schedule->num_sched_entries < 1)
-         || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
-        goto fail;
-
-    for ( i = 0; i < schedule->num_sched_entries; i++ )
-    {
-        /* Check for a valid run time. */
-        if ( schedule->sched_entries[i].runtime <= 0 )
-            goto fail;
-
-        /* Add this entry's run time to total run time. */
-        total_runtime += schedule->sched_entries[i].runtime;
-    }
-
-    /*
-     * Error if the major frame is not large enough to run all entries as
-     * indicated by comparing the total run time to the major frame length
-     */
-    if ( total_runtime > schedule->major_frame )
-        goto fail;
-
-    /* Copy the new schedule into place. */
-    sched_priv->num_schedule_entries = schedule->num_sched_entries;
-    sched_priv->major_frame = schedule->major_frame;
-    for ( i = 0; i < schedule->num_sched_entries; i++ )
-    {
-        memcpy(sched_priv->schedule[i].dom_handle,
-               schedule->sched_entries[i].dom_handle,
-               sizeof(sched_priv->schedule[i].dom_handle));
-        sched_priv->schedule[i].unit_id =
-            schedule->sched_entries[i].vcpu_id;
-        sched_priv->schedule[i].runtime =
-            schedule->sched_entries[i].runtime;
-    }
-    update_schedule_units(ops);
-
-    /*
-     * The newly-installed schedule takes effect immediately. We do not even
-     * wait for the current major frame to expire.
-     *
-     * Signal a new major frame to begin. The next major frame is set up by
-     * the do_schedule callback function when it is next invoked.
-     */
-    sched_priv->next_major_frame = NOW();
-
-    rc = 0;
-
- fail:
-    spin_unlock_irqrestore(&sched_priv->lock, flags);
-    return rc;
-}
-
-static int a653sched_get(const struct scheduler *ops,
-                         struct xen_sysctl_arinc653_schedule *schedule)
-{
-    struct a653sched_private *sched_priv = SCHED_PRIV(ops);
-    unsigned int i;
-    unsigned long flags;
-
-    spin_lock_irqsave(&sched_priv->lock, flags);
-
-    schedule->num_sched_entries = sched_priv->num_schedule_entries;
-    schedule->major_frame = sched_priv->major_frame;
-    for ( i = 0; i < sched_priv->num_schedule_entries; i++ )
-    {
-        memcpy(schedule->sched_entries[i].dom_handle,
-               sched_priv->schedule[i].dom_handle,
-               sizeof(sched_priv->schedule[i].dom_handle));
-        schedule->sched_entries[i].vcpu_id = sched_priv->schedule[i].unit_id;
-        schedule->sched_entries[i].runtime = sched_priv->schedule[i].runtime;
-    }
-
-    spin_unlock_irqrestore(&sched_priv->lock, flags);
-
-    return 0;
-}
-
 static int a653sched_init(struct scheduler *ops)
 {
     struct a653sched_private *prv;
@@ -257,6 +167,20 @@ static void a653sched_deinit(struct scheduler *ops)
     ops->sched_data = NULL;
 }
 
+static spinlock_t *a653sched_switch_sched(struct scheduler *new_ops,
+                                          unsigned int cpu, void *pdata,
+                                          void *vdata)
+{
+    struct sched_resource *sr = get_sched_res(cpu);
+    const struct a653sched_unit *svc = vdata;
+
+    ASSERT(!pdata && svc && is_idle_unit(svc->unit));
+
+    sched_idle_unit(cpu)->priv = vdata;
+
+    return &sr->_lock;
+}
+
 static void *a653sched_alloc_udata(const struct scheduler *ops,
                                    struct sched_unit *unit,
                                    void *dd)
@@ -356,6 +280,27 @@ static void a653sched_unit_wake(const struct scheduler *ops,
     cpu_raise_softirq(sched_unit_master(unit), SCHEDULE_SOFTIRQ);
 }
 
+static struct sched_resource *a653sched_pick_resource(const struct scheduler *ops,
+                                                      const struct sched_unit *unit)
+{
+    const cpumask_t *online;
+    unsigned int cpu;
+
+    /*
+     * If present, prefer unit's current processor, else
+     * just find the first valid unit.
+     */
+    online = cpupool_domain_master_cpumask(unit->domain);
+
+    cpu = cpumask_first(online);
+
+    if ( cpumask_test_cpu(sched_unit_master(unit), online)
+         || (cpu >= nr_cpu_ids) )
+        cpu = sched_unit_master(unit);
+
+    return get_sched_res(cpu);
+}
+
 static void a653sched_do_schedule(const struct scheduler *ops,
                                   struct sched_unit *prev, s_time_t now,
                                   bool tasklet_work_scheduled)
@@ -444,40 +389,94 @@ static void a653sched_do_schedule(const struct scheduler *ops,
     BUG_ON(prev->next_time <= 0);
 }
 
-static struct sched_resource *
-a653sched_pick_resource(const struct scheduler *ops,
-                        const struct sched_unit *unit)
+static int a653sched_set(const struct scheduler *ops,
+                         struct xen_sysctl_arinc653_schedule *schedule)
 {
-    const cpumask_t *online;
-    unsigned int cpu;
+    struct a653sched_private *sched_priv = SCHED_PRIV(ops);
+    s_time_t total_runtime = 0;
+    unsigned int i;
+    unsigned long flags;
+    int rc = -EINVAL;
+
+    spin_lock_irqsave(&sched_priv->lock, flags);
+
+    /* Check for valid major frame and number of schedule entries */
+    if ( (schedule->major_frame <= 0)
+         || (schedule->num_sched_entries < 1)
+         || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
+        goto fail;
+
+    for ( i = 0; i < schedule->num_sched_entries; i++ )
+    {
+        /* Check for a valid run time. */
+        if ( schedule->sched_entries[i].runtime <= 0 )
+            goto fail;
+
+        /* Add this entry's run time to total run time. */
+        total_runtime += schedule->sched_entries[i].runtime;
+    }
 
     /*
-     * If present, prefer unit's current processor, else
-     * just find the first valid unit.
+     * Error if the major frame is not large enough to run all entries as
+     * indicated by comparing the total run time to the major frame length
      */
-    online = cpupool_domain_master_cpumask(unit->domain);
+    if ( total_runtime > schedule->major_frame )
+        goto fail;
 
-    cpu = cpumask_first(online);
+    /* Copy the new schedule into place. */
+    sched_priv->num_schedule_entries = schedule->num_sched_entries;
+    sched_priv->major_frame = schedule->major_frame;
+    for ( i = 0; i < schedule->num_sched_entries; i++ )
+    {
+        memcpy(sched_priv->schedule[i].dom_handle,
+               schedule->sched_entries[i].dom_handle,
+               sizeof(sched_priv->schedule[i].dom_handle));
+        sched_priv->schedule[i].unit_id =
+            schedule->sched_entries[i].vcpu_id;
+        sched_priv->schedule[i].runtime =
+            schedule->sched_entries[i].runtime;
+    }
+    update_schedule_units(ops);
 
-    if ( cpumask_test_cpu(sched_unit_master(unit), online)
-         || (cpu >= nr_cpu_ids) )
-        cpu = sched_unit_master(unit);
+    /*
+     * The newly-installed schedule takes effect immediately. We do not even
+     * wait for the current major frame to expire.
+     *
+     * Signal a new major frame to begin. The next major frame is set up by
+     * the do_schedule callback function when it is next invoked.
+     */
+    sched_priv->next_major_frame = NOW();
 
-    return get_sched_res(cpu);
+    rc = 0;
+
+ fail:
+    spin_unlock_irqrestore(&sched_priv->lock, flags);
+    return rc;
 }
 
-static spinlock_t *a653sched_switch_sched(struct scheduler *new_ops,
-                                          unsigned int cpu, void *pdata,
-                                          void *vdata)
+static int a653sched_get(const struct scheduler *ops,
+                         struct xen_sysctl_arinc653_schedule *schedule)
 {
-    struct sched_resource *sr = get_sched_res(cpu);
-    const struct a653sched_unit *svc = vdata;
+    struct a653sched_private *sched_priv = SCHED_PRIV(ops);
+    unsigned int i;
+    unsigned long flags;
 
-    ASSERT(!pdata && svc && is_idle_unit(svc->unit));
+    spin_lock_irqsave(&sched_priv->lock, flags);
 
-    sched_idle_unit(cpu)->priv = vdata;
+    schedule->num_sched_entries = sched_priv->num_schedule_entries;
+    schedule->major_frame = sched_priv->major_frame;
+    for ( i = 0; i < sched_priv->num_schedule_entries; i++ )
+    {
+        memcpy(schedule->sched_entries[i].dom_handle,
+               sched_priv->schedule[i].dom_handle,
+               sizeof(sched_priv->schedule[i].dom_handle));
+        schedule->sched_entries[i].vcpu_id = sched_priv->schedule[i].unit_id;
+        schedule->sched_entries[i].runtime = sched_priv->schedule[i].runtime;
+    }
 
-    return &sr->_lock;
+    spin_unlock_irqrestore(&sched_priv->lock, flags);
+
+    return 0;
 }
 
 static int a653sched_adjust_global(const struct scheduler *ops,
@@ -517,27 +516,35 @@ static const struct scheduler sched_arinc653_def = {
     .sched_id       = XEN_SCHEDULER_ARINC653,
     .sched_data     = NULL,
 
+    .global_init    = NULL,
     .init           = a653sched_init,
     .deinit         = a653sched_deinit,
 
-    .free_udata     = a653sched_free_udata,
-    .alloc_udata    = a653sched_alloc_udata,
+    .alloc_pdata    = NULL,
+    .switch_sched   = a653sched_switch_sched,
+    .deinit_pdata   = NULL,
+    .free_pdata     = NULL,
 
+    .alloc_domdata  = NULL,
+    .free_domdata   = NULL,
+
+    .alloc_udata    = a653sched_alloc_udata,
     .insert_unit    = NULL,
     .remove_unit    = NULL,
+    .free_udata     = a653sched_free_udata,
 
     .sleep          = a653sched_unit_sleep,
     .wake           = a653sched_unit_wake,
     .yield          = NULL,
     .context_saved  = NULL,
 
-    .do_schedule    = a653sched_do_schedule,
-
     .pick_resource  = a653sched_pick_resource,
+    .migrate        = NULL,
 
-    .switch_sched   = a653sched_switch_sched,
+    .do_schedule    = a653sched_do_schedule,
 
     .adjust         = NULL,
+    .adjust_affinity= NULL,
     .adjust_global  = a653sched_adjust_global,
 
     .dump_settings  = NULL,
-- 
2.17.1



  parent reply	other threads:[~2020-09-17  4:20 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-16 18:18 [PATCH 0/5] Multicore support for ARINC653 scheduler Jeff Kubascik
2020-09-16 18:18 ` [PATCH 1/5] sched/arinc653: Clean up comments Jeff Kubascik
2020-09-17 13:24   ` Andrew Cooper
2020-09-18 15:33     ` Jeff Kubascik
2020-09-16 18:18 ` [PATCH 2/5] sched/arinc653: Rename scheduler private structs Jeff Kubascik
2020-09-17 12:09   ` Andrew Cooper
2020-09-17 14:46     ` Dario Faggioli
2020-09-18 15:52       ` Jeff Kubascik
2020-09-16 18:18 ` [PATCH 3/5] sched/arinc653: Clean up function definitions Jeff Kubascik
2020-09-17  8:09   ` Jan Beulich
2020-09-17 14:40     ` Dario Faggioli
2020-09-18 17:43       ` Jeff Kubascik
2020-09-16 18:18 ` Jeff Kubascik [this message]
2020-09-17  8:12   ` [PATCH 4/5] sched/arinc653: Reorganize function definition order Jan Beulich
2020-09-17 14:16     ` Dario Faggioli
2020-09-18 18:21       ` Jeff Kubascik
2020-09-17 14:17     ` Andrew Cooper
2020-09-18 18:04       ` Jeff Kubascik
2020-09-18 18:05       ` Jeff Kubascik
2020-09-16 18:18 ` [PATCH 5/5] sched/arinc653: Implement CAST-32A multicore scheduling Jeff Kubascik
2020-09-17  9:04   ` Jürgen Groß
2020-09-17 15:10     ` Stewart Hildebrand
2020-09-17 15:18       ` Jürgen Groß
2020-09-17 15:20       ` Dario Faggioli
2020-09-17 15:59         ` Stewart Hildebrand
2020-09-17 17:30           ` Dario Faggioli
2020-09-18 20:03             ` Jeff Kubascik
2020-09-18 20:34               ` Dario Faggioli
2020-09-22 19:50               ` Andrew Cooper
2020-09-17 14:42   ` Andrew Cooper
2020-09-17 14:57     ` Stewart Hildebrand
2020-09-17 16:18       ` Andrew Cooper
2020-09-17 17:57         ` Stewart Hildebrand
2020-09-18 19:22           ` Jeff Kubascik

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200916181854.75563-5-jeff.kubascik@dornerworks.com \
    --to=jeff.kubascik@dornerworks.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@citrix.com \
    --cc=josh.whitehead@dornerworks.com \
    --cc=stewart.hildebrand@dornerworks.com \
    --cc=xen-devel@dornerworks.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.